@promptbook/node 0.94.0-7 → 0.95.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/README.md +8 -21
  2. package/esm/index.es.js +44 -44
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/types.index.d.ts +2 -2
  5. package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +2 -2
  6. package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
  7. package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
  8. package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
  10. package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
  11. package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  12. package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
  13. package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
  14. package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
  15. package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
  17. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
  19. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
  20. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
  21. package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
  22. package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
  23. package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
  26. package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
  28. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
  29. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
  30. package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
  31. package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
  32. package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
  33. package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
  34. package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
  35. package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
  36. package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
  37. package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
  38. package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
  39. package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
  40. package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
  41. package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
  42. package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
  43. package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
  44. package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
  45. package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
  46. package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
  47. package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
  48. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
  49. package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
  50. package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
  51. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
  52. package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
  53. package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
  54. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  55. package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
  56. package/esm/typings/src/version.d.ts +1 -1
  57. package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
  58. package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
  59. package/package.json +25 -14
  60. package/umd/index.umd.js +44 -44
  61. package/umd/index.umd.js.map +1 -1
package/README.md CHANGED
@@ -25,10 +25,6 @@ Write AI applications using plain human language across multiple models and plat
25
25
 
26
26
 
27
27
 
28
- <blockquote style="color: #ff8811">
29
- <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
30
- </blockquote>
31
-
32
28
  ## 📦 Package `@promptbook/node`
33
29
 
34
30
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
@@ -60,17 +56,15 @@ Rest of the documentation is common for **entire promptbook ecosystem**:
60
56
 
61
57
  ## 🤍 The Book Abstract
62
58
 
63
- **It's time for a paradigm shift! The future of software is in plain English, French or Latin.**
59
+ **It's time for a paradigm shift! The future of software is written in plain English, French, or Latin.**
64
60
 
65
61
  During the computer revolution, we have seen [multiple generations of computer languages](https://github.com/webgptorg/promptbook/discussions/180), from the physical rewiring of the vacuum tubes through low-level machine code to the high-level languages like Python or JavaScript. And now, we're on the edge of the **next revolution**!
66
62
 
67
-
68
-
69
63
  It's a revolution of writing software in **plain human language** that is understandable and executable by both humans and machines – and it's going to change everything!
70
64
 
71
65
  The incredible growth in power of microprocessors and the Moore's Law have been the driving force behind the ever-more powerful languages, and it's been an amazing journey! Similarly, the large language models (like GPT or Claude) are the next big thing in language technology, and they're set to transform the way we interact with computers.
72
66
 
73
- This shift is going to happen, whether we are ready for it or not. Our mission is to make it excellently, not just good.
67
+ This shift will happen whether we're ready or not. Our mission is to make it excellent, not just good.
74
68
 
75
69
  **Join us in this journey!**
76
70
 
@@ -191,9 +185,6 @@ Join our growing community of developers and users:
191
185
 
192
186
  _A concise, Markdown-based DSL for crafting AI workflows and automations._
193
187
 
194
-
195
-
196
-
197
188
  ### Introduction
198
189
 
199
190
  Book is a Markdown-based language that simplifies the creation of AI applications, workflows, and automations. With human-readable commands, you can define inputs, outputs, personas, knowledge sources, and actions—without needing model-specific details.
@@ -221,7 +212,7 @@ Book is a Markdown-based language that simplifies the creation of AI application
221
212
  → {article}
222
213
  ```
223
214
 
224
- Each part of the book defines one of 3 circles:
215
+ Each part of the book defines one of three circles:
225
216
 
226
217
  ### **1. What:** Workflows, Tasks and Parameters
227
218
 
@@ -243,8 +234,6 @@ Personas can have access to different knowledge, tools and actions. They can als
243
234
 
244
235
  - [PERSONA](https://github.com/webgptorg/promptbook/blob/main/documents/commands/PERSONA.md)
245
236
 
246
-
247
-
248
237
  ### **3. How:** Knowledge, Instruments and Actions
249
238
 
250
239
  The resources used by the personas are used to do the work.
@@ -259,9 +248,9 @@ The resources used by the personas are used to do the work.
259
248
 
260
249
  Book language is based on markdown. It is subset of markdown. It is designed to be easy to read and write. It is designed to be understandable by both humans and machines and without specific knowledge of the language.
261
250
 
262
- The file has `.book` extension. It uses `UTF-8` non BOM encoding.
251
+ The file has a `.book` extension and uses UTF-8 encoding without BOM.
263
252
 
264
- Book has two variants: flat - which is just a prompt with no structure, and full - which has a structure with tasks, commands and prompts.
253
+ Books have two variants: flat just a prompt without structure, and full with tasks, commands, and prompts.
265
254
 
266
255
  As it is source code, it can leverage all the features of version control systems like git and does not suffer from the problems of binary formats, proprietary formats, or no-code solutions.
267
256
 
@@ -292,13 +281,13 @@ Or you can install them separately:
292
281
 
293
282
  - ⭐ **[ptbk](https://www.npmjs.com/package/ptbk)** - Bundle of all packages, when you want to install everything and you don't care about the size
294
283
  - **[promptbook](https://www.npmjs.com/package/promptbook)** - Same as `ptbk`
295
- - ⭐🧙‍♂️ **[@promptbook/wizzard](https://www.npmjs.com/package/@promptbook/wizzard)** - Wizzard to just run the books in node without any struggle
284
+ - ⭐🧙‍♂️ **[@promptbook/wizard](https://www.npmjs.com/package/@promptbook/wizard)** - Wizard to just run the books in node without any struggle
296
285
  - **[@promptbook/core](https://www.npmjs.com/package/@promptbook/core)** - Core of the library, it contains the main logic for promptbooks
297
286
  - **[@promptbook/node](https://www.npmjs.com/package/@promptbook/node)** - Core of the library for Node.js environment
298
287
  - **[@promptbook/browser](https://www.npmjs.com/package/@promptbook/browser)** - Core of the library for browser environment
299
288
  - ⭐ **[@promptbook/utils](https://www.npmjs.com/package/@promptbook/utils)** - Utility functions used in the library but also useful for individual use in preprocessing and postprocessing LLM inputs and outputs
300
289
  - **[@promptbook/markdown-utils](https://www.npmjs.com/package/@promptbook/markdown-utils)** - Utility functions used for processing markdown
301
- - _(Not finished)_ **[@promptbook/wizzard](https://www.npmjs.com/package/@promptbook/wizzard)** - Wizard for creating+running promptbooks in single line
290
+ - _(Not finished)_ **[@promptbook/wizard](https://www.npmjs.com/package/@promptbook/wizard)** - Wizard for creating+running promptbooks in single line
302
291
  - **[@promptbook/javascript](https://www.npmjs.com/package/@promptbook/javascript)** - Execution tools for javascript inside promptbooks
303
292
  - **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
304
293
  - **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
@@ -342,9 +331,7 @@ The following glossary is used to clarify certain concepts:
342
331
  - **Retrieval-augmented generation** is a machine learning paradigm where a model generates text by retrieving relevant information from a large database of text. This approach combines the benefits of generative models and retrieval models.
343
332
  - **Longtail** refers to non-common or rare events, items, or entities that are not well-represented in the training data of machine learning models. Longtail items are often challenging for models to predict accurately.
344
333
 
345
- _Note: This section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
346
-
347
-
334
+ _Note: This section is not a complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
348
335
 
349
336
  ### 💯 Core concepts
350
337
 
package/esm/index.es.js CHANGED
@@ -30,7 +30,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
30
30
  * @generated
31
31
  * @see https://github.com/webgptorg/promptbook
32
32
  */
33
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-7';
33
+ const PROMPTBOOK_ENGINE_VERSION = '0.95.0';
34
34
  /**
35
35
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
36
36
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -196,11 +196,11 @@ const DEFAULT_SCRAPE_CACHE_DIRNAME = './.promptbook/scrape-cache';
196
196
  /*
197
197
  TODO: [🌃]
198
198
  /**
199
- * Id of application for the wizzard when using remote server
199
+ * Id of application for the wizard when using remote server
200
200
  *
201
201
  * @public exported from `@promptbook/core`
202
202
  * /
203
- ex-port const WIZZARD_APP_ID: string_app_id = 'wizzard';
203
+ ex-port const WIZARD_APP_ID: string_app_id = 'wizard';
204
204
  */
205
205
  /**
206
206
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
@@ -522,7 +522,7 @@ function checkSerializableAsJson(options) {
522
522
  else {
523
523
  for (const [subName, subValue] of Object.entries(value)) {
524
524
  if (subValue === undefined) {
525
- // Note: undefined in object is serializable - it is just omited
525
+ // Note: undefined in object is serializable - it is just omitted
526
526
  continue;
527
527
  }
528
528
  checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
@@ -1131,7 +1131,7 @@ function validatePipeline_InnerFunction(pipeline) {
1131
1131
  * @param fs Filesystem tools
1132
1132
  * @returns Pipelines loaded from the archive
1133
1133
  *
1134
- * @private utility of Prompbook
1134
+ * @private utility of Promptbook
1135
1135
  */
1136
1136
  async function loadArchive(filePath, fs) {
1137
1137
  if (!filePath.endsWith('.bookc')) {
@@ -1153,7 +1153,7 @@ async function loadArchive(filePath, fs) {
1153
1153
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
1154
1154
  */
1155
1155
 
1156
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1156
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1157
1157
 
1158
1158
  /**
1159
1159
  * Checks if value is valid email
@@ -1295,7 +1295,7 @@ function prettifyMarkdown(content) {
1295
1295
  });
1296
1296
  }
1297
1297
  catch (error) {
1298
- // TODO: [🟥] Detect browser / node and make it colorfull
1298
+ // TODO: [🟥] Detect browser / node and make it colorful
1299
1299
  console.error('There was an error with prettifying the markdown, using the original as the fallback', {
1300
1300
  error,
1301
1301
  html: content,
@@ -1586,7 +1586,7 @@ class SimplePipelineCollection {
1586
1586
 
1587
1587
  Note: You have probably forgotten to run "ptbk make" to update the collection
1588
1588
  Note: Pipelines with the same URL are not allowed
1589
- Only exepction is when the pipelines are identical
1589
+ Only exception is when the pipelines are identical
1590
1590
 
1591
1591
  `));
1592
1592
  }
@@ -3075,13 +3075,13 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3075
3075
 
3076
3076
  Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
3077
3077
  `);
3078
- // TODO: [🟥] Detect browser / node and make it colorfull
3078
+ // TODO: [🟥] Detect browser / node and make it colorful
3079
3079
  console.warn(warningMessage);
3080
3080
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
3081
3081
  /*
3082
3082
  return {
3083
3083
  async listModels() {
3084
- // TODO: [🟥] Detect browser / node and make it colorfull
3084
+ // TODO: [🟥] Detect browser / node and make it colorful
3085
3085
  console.warn(
3086
3086
  spaceTrim(
3087
3087
  (block) => `
@@ -4266,10 +4266,10 @@ function knowledgePiecesToString(knowledgePieces) {
4266
4266
  */
4267
4267
  async function getKnowledgeForTask(options) {
4268
4268
  const { tools, preparedPipeline, task, parameters } = options;
4269
- const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
4270
- const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
4269
+ const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
4270
+ const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
4271
4271
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
4272
- if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
4272
+ if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
4273
4273
  return ''; // <- Note: Np knowledge present, return empty string
4274
4274
  }
4275
4275
  try {
@@ -4280,7 +4280,7 @@ async function getKnowledgeForTask(options) {
4280
4280
  title: 'Knowledge Search',
4281
4281
  modelRequirements: {
4282
4282
  modelVariant: 'EMBEDDING',
4283
- modelName: firstKnowlegeIndex.modelName,
4283
+ modelName: firstKnowledgeIndex.modelName,
4284
4284
  },
4285
4285
  content: task.content,
4286
4286
  parameters,
@@ -4288,7 +4288,7 @@ async function getKnowledgeForTask(options) {
4288
4288
  const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
4289
4289
  const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
4290
4290
  const { index } = knowledgePiece;
4291
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
4291
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
4292
4292
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
4293
4293
  if (knowledgePieceIndex === undefined) {
4294
4294
  return {
@@ -4309,8 +4309,8 @@ async function getKnowledgeForTask(options) {
4309
4309
  task,
4310
4310
  taskEmbeddingPrompt,
4311
4311
  taskEmbeddingResult,
4312
- firstKnowlegePiece,
4313
- firstKnowlegeIndex,
4312
+ firstKnowledgePiece,
4313
+ firstKnowledgeIndex,
4314
4314
  knowledgePiecesWithRelevance,
4315
4315
  knowledgePiecesSorted,
4316
4316
  knowledgePiecesLimited,
@@ -4379,7 +4379,7 @@ async function getReservedParametersForTask(options) {
4379
4379
  * @private internal utility of `createPipelineExecutor`
4380
4380
  */
4381
4381
  async function executeTask(options) {
4382
- const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
4382
+ const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
4383
4383
  const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
4384
4384
  // Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
4385
4385
  const usedParameterNames = extractParameterNamesFromTask(currentTask);
@@ -4467,7 +4467,7 @@ async function executeTask(options) {
4467
4467
  cacheDirname,
4468
4468
  intermediateFilesStrategy,
4469
4469
  isAutoInstalled,
4470
- isNotPreparedWarningSupressed,
4470
+ isNotPreparedWarningSuppressed,
4471
4471
  });
4472
4472
  await onProgress({
4473
4473
  outputParameters: {
@@ -4562,7 +4562,7 @@ async function executePipeline(options) {
4562
4562
  }
4563
4563
  return exportJson({
4564
4564
  name: `executionReport`,
4565
- message: `Unuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
4565
+ message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
4566
4566
  order: [],
4567
4567
  value: {
4568
4568
  isSuccessful: false,
@@ -4599,7 +4599,7 @@ async function executePipeline(options) {
4599
4599
  return exportJson({
4600
4600
  name: 'pipelineExecutorResult',
4601
4601
  message: spaceTrim$1((block) => `
4602
- Unuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
4602
+ Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
4603
4603
 
4604
4604
  ${block(pipelineIdentification)}
4605
4605
  `),
@@ -4740,7 +4740,7 @@ async function executePipeline(options) {
4740
4740
  }
4741
4741
  return exportJson({
4742
4742
  name: 'pipelineExecutorResult',
4743
- message: `Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
4743
+ message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
4744
4744
  order: [],
4745
4745
  value: {
4746
4746
  isSuccessful: false,
@@ -4791,7 +4791,7 @@ async function executePipeline(options) {
4791
4791
  * @public exported from `@promptbook/core`
4792
4792
  */
4793
4793
  function createPipelineExecutor(options) {
4794
- const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSupressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
4794
+ const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
4795
4795
  validatePipeline(pipeline);
4796
4796
  const pipelineIdentification = (() => {
4797
4797
  // Note: This is a 😐 implementation of [🚞]
@@ -4808,7 +4808,7 @@ function createPipelineExecutor(options) {
4808
4808
  if (isPipelinePrepared(pipeline)) {
4809
4809
  preparedPipeline = pipeline;
4810
4810
  }
4811
- else if (isNotPreparedWarningSupressed !== true) {
4811
+ else if (isNotPreparedWarningSuppressed !== true) {
4812
4812
  console.warn(spaceTrim$1((block) => `
4813
4813
  Pipeline is not prepared
4814
4814
 
@@ -4841,7 +4841,7 @@ function createPipelineExecutor(options) {
4841
4841
  maxParallelCount,
4842
4842
  csvSettings,
4843
4843
  isVerbose,
4844
- isNotPreparedWarningSupressed,
4844
+ isNotPreparedWarningSuppressed,
4845
4845
  rootDirname,
4846
4846
  cacheDirname,
4847
4847
  intermediateFilesStrategy,
@@ -4850,7 +4850,7 @@ function createPipelineExecutor(options) {
4850
4850
  assertsError(error);
4851
4851
  return exportJson({
4852
4852
  name: 'pipelineExecutorResult',
4853
- message: `Unuccessful PipelineExecutorResult, last catch`,
4853
+ message: `Unsuccessful PipelineExecutorResult, last catch`,
4854
4854
  order: [],
4855
4855
  value: {
4856
4856
  isSuccessful: false,
@@ -4920,12 +4920,12 @@ function countUsage(llmTools) {
4920
4920
  get title() {
4921
4921
  return `${llmTools.title} (+usage)`;
4922
4922
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
4923
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
4923
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
4924
4924
  },
4925
4925
  get description() {
4926
4926
  return `${llmTools.description} (+usage)`;
4927
4927
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
4928
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
4928
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
4929
4929
  },
4930
4930
  checkConfiguration() {
4931
4931
  return /* not await */ llmTools.checkConfiguration();
@@ -5212,17 +5212,17 @@ function $registeredScrapersMessage(availableScrapers) {
5212
5212
  * Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
5213
5213
  */
5214
5214
  const all = [];
5215
- for (const { packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser, } of $scrapersMetadataRegister.list()) {
5215
+ for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
5216
5216
  if (all.some((item) => item.packageName === packageName && item.className === className)) {
5217
5217
  continue;
5218
5218
  }
5219
- all.push({ packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser });
5219
+ all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
5220
5220
  }
5221
- for (const { packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser, } of $scrapersRegister.list()) {
5221
+ for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
5222
5222
  if (all.some((item) => item.packageName === packageName && item.className === className)) {
5223
5223
  continue;
5224
5224
  }
5225
- all.push({ packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser });
5225
+ all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
5226
5226
  }
5227
5227
  for (const { metadata } of availableScrapers) {
5228
5228
  all.push(metadata);
@@ -5234,8 +5234,8 @@ function $registeredScrapersMessage(availableScrapers) {
5234
5234
  const isInstalled = $scrapersRegister
5235
5235
  .list()
5236
5236
  .find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
5237
- const isAvilableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
5238
- return { ...metadata, isMetadataAviailable, isInstalled, isAvilableInTools };
5237
+ const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
5238
+ return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
5239
5239
  });
5240
5240
  if (metadata.length === 0) {
5241
5241
  return spaceTrim(`
@@ -5248,7 +5248,7 @@ function $registeredScrapersMessage(availableScrapers) {
5248
5248
  return spaceTrim((block) => `
5249
5249
  Available scrapers are:
5250
5250
  ${block(metadata
5251
- .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvilableInBrowser, isAvilableInTools, }, i) => {
5251
+ .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
5252
5252
  const more = [];
5253
5253
  // TODO: [🧠] Maybe use `documentationUrl`
5254
5254
  if (isMetadataAviailable) {
@@ -5257,16 +5257,16 @@ function $registeredScrapersMessage(availableScrapers) {
5257
5257
  if (isInstalled) {
5258
5258
  more.push(`🟩 Installed`);
5259
5259
  } // not else
5260
- if (isAvilableInTools) {
5260
+ if (isAvailableInTools) {
5261
5261
  more.push(`🟦 Available in tools`);
5262
5262
  } // not else
5263
5263
  if (!isMetadataAviailable && isInstalled) {
5264
5264
  more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
5265
5265
  } // not else
5266
- if (!isInstalled && isAvilableInTools) {
5266
+ if (!isInstalled && isAvailableInTools) {
5267
5267
  more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
5268
5268
  } // not else
5269
- if (!isAvilableInBrowser) {
5269
+ if (!isAvailableInBrowser) {
5270
5270
  more.push(`Not usable in browser`);
5271
5271
  }
5272
5272
  const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
@@ -5732,7 +5732,7 @@ TODO: [🧊] This is how it can look in future
5732
5732
  /**
5733
5733
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
5734
5734
  * Put `knowledgePieces` into `PrepareKnowledgeOptions`
5735
- * TODO: [🪂] More than max things can run in parallel by acident [1,[2a,2b,_],[3a,3b,_]]
5735
+ * TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
5736
5736
  * TODO: [🧠][❎] Do here proper M:N mapping
5737
5737
  * [x] One source can make multiple pieces
5738
5738
  * [ ] One piece can have multiple sources
@@ -6620,7 +6620,7 @@ const expectCommandParser = {
6620
6620
  $taskJson.expectations[unit] = $taskJson.expectations[unit] || {};
6621
6621
  if (command.sign === 'MINIMUM' || command.sign === 'EXACTLY') {
6622
6622
  if ($taskJson.expectations[unit].min !== undefined) {
6623
- throw new ParseError(`Already defined minumum ${$taskJson.expectations[unit].min} ${command.unit.toLowerCase()}, now trying to redefine it to ${command.amount}`);
6623
+ throw new ParseError(`Already defined minimum ${$taskJson.expectations[unit].min} ${command.unit.toLowerCase()}, now trying to redefine it to ${command.amount}`);
6624
6624
  }
6625
6625
  $taskJson.expectations[unit].min = command.amount;
6626
6626
  } /* not else */
@@ -10873,7 +10873,7 @@ async function listAllFiles(path, isRecursive, fs) {
10873
10873
  return fileNames;
10874
10874
  }
10875
10875
  /**
10876
- * TODO: [😶] Unite floder listing
10876
+ * TODO: [😶] Unite folder listing
10877
10877
  * Note: Not [~🟢~] because it is not directly dependent on `fs
10878
10878
  * TODO: [🖇] What about symlinks?
10879
10879
  */
@@ -11020,7 +11020,7 @@ async function createCollectionFromDirectory(rootPath, tools, options) {
11020
11020
  if (isCrashedOnError) {
11021
11021
  throw new CollectionError(wrappedErrorMessage);
11022
11022
  }
11023
- // TODO: [🟥] Detect browser / node and make it colorfull
11023
+ // TODO: [🟥] Detect browser / node and make it colorful
11024
11024
  console.error(wrappedErrorMessage);
11025
11025
  }
11026
11026
  }
@@ -11087,7 +11087,7 @@ async function createCollectionFromDirectory(rootPath, tools, options) {
11087
11087
 
11088
11088
  Note: You have probably forgotten to run "ptbk make" to update the collection
11089
11089
  Note: Pipelines with the same URL are not allowed
11090
- Only exepction is when the pipelines are identical
11090
+ Only exception is when the pipelines are identical
11091
11091
 
11092
11092
  `));
11093
11093
  }
@@ -11111,7 +11111,7 @@ async function createCollectionFromDirectory(rootPath, tools, options) {
11111
11111
  if (isCrashedOnError) {
11112
11112
  throw new CollectionError(wrappedErrorMessage);
11113
11113
  }
11114
- // TODO: [🟥] Detect browser / node and make it colorfull
11114
+ // TODO: [🟥] Detect browser / node and make it colorful
11115
11115
  console.error(wrappedErrorMessage);
11116
11116
  }
11117
11117
  }