@promptbook/legacy-documents 0.94.0-7 → 0.95.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/README.md +8 -21
  2. package/esm/index.es.js +45 -45
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/types.index.d.ts +2 -2
  5. package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +2 -2
  6. package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
  7. package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
  8. package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
  10. package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
  11. package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  12. package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
  13. package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
  14. package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
  15. package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
  17. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
  19. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
  20. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
  21. package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
  22. package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
  23. package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
  26. package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
  28. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
  29. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
  30. package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
  31. package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
  32. package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
  33. package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
  34. package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
  35. package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
  36. package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
  37. package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
  38. package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
  39. package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
  40. package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
  41. package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
  42. package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
  43. package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
  44. package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
  45. package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
  46. package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
  47. package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
  48. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
  49. package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
  50. package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
  51. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
  52. package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
  53. package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
  54. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  55. package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
  56. package/esm/typings/src/version.d.ts +1 -1
  57. package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
  58. package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
  59. package/package.json +25 -14
  60. package/umd/index.umd.js +45 -45
  61. package/umd/index.umd.js.map +1 -1
package/README.md CHANGED
@@ -25,10 +25,6 @@ Write AI applications using plain human language across multiple models and plat
25
25
 
26
26
 
27
27
 
28
- <blockquote style="color: #ff8811">
29
- <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
30
- </blockquote>
31
-
32
28
  ## 📦 Package `@promptbook/legacy-documents`
33
29
 
34
30
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
@@ -58,17 +54,15 @@ Rest of the documentation is common for **entire promptbook ecosystem**:
58
54
 
59
55
  ## 🤍 The Book Abstract
60
56
 
61
- **It's time for a paradigm shift! The future of software is in plain English, French or Latin.**
57
+ **It's time for a paradigm shift! The future of software is written in plain English, French, or Latin.**
62
58
 
63
59
  During the computer revolution, we have seen [multiple generations of computer languages](https://github.com/webgptorg/promptbook/discussions/180), from the physical rewiring of the vacuum tubes through low-level machine code to the high-level languages like Python or JavaScript. And now, we're on the edge of the **next revolution**!
64
60
 
65
-
66
-
67
61
  It's a revolution of writing software in **plain human language** that is understandable and executable by both humans and machines – and it's going to change everything!
68
62
 
69
63
  The incredible growth in power of microprocessors and the Moore's Law have been the driving force behind the ever-more powerful languages, and it's been an amazing journey! Similarly, the large language models (like GPT or Claude) are the next big thing in language technology, and they're set to transform the way we interact with computers.
70
64
 
71
- This shift is going to happen, whether we are ready for it or not. Our mission is to make it excellently, not just good.
65
+ This shift will happen whether we're ready or not. Our mission is to make it excellent, not just good.
72
66
 
73
67
  **Join us in this journey!**
74
68
 
@@ -189,9 +183,6 @@ Join our growing community of developers and users:
189
183
 
190
184
  _A concise, Markdown-based DSL for crafting AI workflows and automations._
191
185
 
192
-
193
-
194
-
195
186
  ### Introduction
196
187
 
197
188
  Book is a Markdown-based language that simplifies the creation of AI applications, workflows, and automations. With human-readable commands, you can define inputs, outputs, personas, knowledge sources, and actions—without needing model-specific details.
@@ -219,7 +210,7 @@ Book is a Markdown-based language that simplifies the creation of AI application
219
210
  → {article}
220
211
  ```
221
212
 
222
- Each part of the book defines one of 3 circles:
213
+ Each part of the book defines one of three circles:
223
214
 
224
215
  ### **1. What:** Workflows, Tasks and Parameters
225
216
 
@@ -241,8 +232,6 @@ Personas can have access to different knowledge, tools and actions. They can als
241
232
 
242
233
  - [PERSONA](https://github.com/webgptorg/promptbook/blob/main/documents/commands/PERSONA.md)
243
234
 
244
-
245
-
246
235
  ### **3. How:** Knowledge, Instruments and Actions
247
236
 
248
237
  The resources used by the personas are used to do the work.
@@ -257,9 +246,9 @@ The resources used by the personas are used to do the work.
257
246
 
258
247
  Book language is based on markdown. It is subset of markdown. It is designed to be easy to read and write. It is designed to be understandable by both humans and machines and without specific knowledge of the language.
259
248
 
260
- The file has `.book` extension. It uses `UTF-8` non BOM encoding.
249
+ The file has a `.book` extension and uses UTF-8 encoding without BOM.
261
250
 
262
- Book has two variants: flat - which is just a prompt with no structure, and full - which has a structure with tasks, commands and prompts.
251
+ Books have two variants: flat just a prompt without structure, and full with tasks, commands, and prompts.
263
252
 
264
253
  As it is source code, it can leverage all the features of version control systems like git and does not suffer from the problems of binary formats, proprietary formats, or no-code solutions.
265
254
 
@@ -290,13 +279,13 @@ Or you can install them separately:
290
279
 
291
280
  - ⭐ **[ptbk](https://www.npmjs.com/package/ptbk)** - Bundle of all packages, when you want to install everything and you don't care about the size
292
281
  - **[promptbook](https://www.npmjs.com/package/promptbook)** - Same as `ptbk`
293
- - ⭐🧙‍♂️ **[@promptbook/wizzard](https://www.npmjs.com/package/@promptbook/wizzard)** - Wizzard to just run the books in node without any struggle
282
+ - ⭐🧙‍♂️ **[@promptbook/wizard](https://www.npmjs.com/package/@promptbook/wizard)** - Wizard to just run the books in node without any struggle
294
283
  - **[@promptbook/core](https://www.npmjs.com/package/@promptbook/core)** - Core of the library, it contains the main logic for promptbooks
295
284
  - **[@promptbook/node](https://www.npmjs.com/package/@promptbook/node)** - Core of the library for Node.js environment
296
285
  - **[@promptbook/browser](https://www.npmjs.com/package/@promptbook/browser)** - Core of the library for browser environment
297
286
  - ⭐ **[@promptbook/utils](https://www.npmjs.com/package/@promptbook/utils)** - Utility functions used in the library but also useful for individual use in preprocessing and postprocessing LLM inputs and outputs
298
287
  - **[@promptbook/markdown-utils](https://www.npmjs.com/package/@promptbook/markdown-utils)** - Utility functions used for processing markdown
299
- - _(Not finished)_ **[@promptbook/wizzard](https://www.npmjs.com/package/@promptbook/wizzard)** - Wizard for creating+running promptbooks in single line
288
+ - _(Not finished)_ **[@promptbook/wizard](https://www.npmjs.com/package/@promptbook/wizard)** - Wizard for creating+running promptbooks in single line
300
289
  - **[@promptbook/javascript](https://www.npmjs.com/package/@promptbook/javascript)** - Execution tools for javascript inside promptbooks
301
290
  - **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
302
291
  - **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
@@ -340,9 +329,7 @@ The following glossary is used to clarify certain concepts:
340
329
  - **Retrieval-augmented generation** is a machine learning paradigm where a model generates text by retrieving relevant information from a large database of text. This approach combines the benefits of generative models and retrieval models.
341
330
  - **Longtail** refers to non-common or rare events, items, or entities that are not well-represented in the training data of machine learning models. Longtail items are often challenging for models to predict accurately.
342
331
 
343
- _Note: This section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
344
-
345
-
332
+ _Note: This section is not a complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
346
333
 
347
334
  ### 💯 Core concepts
348
335
 
package/esm/index.es.js CHANGED
@@ -28,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.94.0-7';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.95.0';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1054,7 +1054,7 @@ async function getScraperIntermediateSource(source, options) {
1054
1054
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
1055
1055
  */
1056
1056
 
1057
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1057
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1058
1058
 
1059
1059
  /**
1060
1060
  * Checks if value is valid email
@@ -1211,7 +1211,7 @@ function prettifyMarkdown(content) {
1211
1211
  });
1212
1212
  }
1213
1213
  catch (error) {
1214
- // TODO: [🟥] Detect browser / node and make it colorfull
1214
+ // TODO: [🟥] Detect browser / node and make it colorful
1215
1215
  console.error('There was an error with prettifying the markdown, using the original as the fallback', {
1216
1216
  error,
1217
1217
  html: content,
@@ -1493,7 +1493,7 @@ function checkSerializableAsJson(options) {
1493
1493
  else {
1494
1494
  for (const [subName, subValue] of Object.entries(value)) {
1495
1495
  if (subValue === undefined) {
1496
- // Note: undefined in object is serializable - it is just omited
1496
+ // Note: undefined in object is serializable - it is just omitted
1497
1497
  continue;
1498
1498
  }
1499
1499
  checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
@@ -2183,7 +2183,7 @@ class SimplePipelineCollection {
2183
2183
 
2184
2184
  Note: You have probably forgotten to run "ptbk make" to update the collection
2185
2185
  Note: Pipelines with the same URL are not allowed
2186
- Only exepction is when the pipelines are identical
2186
+ Only exception is when the pipelines are identical
2187
2187
 
2188
2188
  `));
2189
2189
  }
@@ -2933,12 +2933,12 @@ function countUsage(llmTools) {
2933
2933
  get title() {
2934
2934
  return `${llmTools.title} (+usage)`;
2935
2935
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2936
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2936
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
2937
2937
  },
2938
2938
  get description() {
2939
2939
  return `${llmTools.description} (+usage)`;
2940
2940
  // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2941
- // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2941
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
2942
2942
  },
2943
2943
  checkConfiguration() {
2944
2944
  return /* not await */ llmTools.checkConfiguration();
@@ -3165,13 +3165,13 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3165
3165
 
3166
3166
  Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
3167
3167
  `);
3168
- // TODO: [🟥] Detect browser / node and make it colorfull
3168
+ // TODO: [🟥] Detect browser / node and make it colorful
3169
3169
  console.warn(warningMessage);
3170
3170
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
3171
3171
  /*
3172
3172
  return {
3173
3173
  async listModels() {
3174
- // TODO: [🟥] Detect browser / node and make it colorfull
3174
+ // TODO: [🟥] Detect browser / node and make it colorful
3175
3175
  console.warn(
3176
3176
  spaceTrim(
3177
3177
  (block) => `
@@ -3447,17 +3447,17 @@ function $registeredScrapersMessage(availableScrapers) {
3447
3447
  * Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
3448
3448
  */
3449
3449
  const all = [];
3450
- for (const { packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser, } of $scrapersMetadataRegister.list()) {
3450
+ for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
3451
3451
  if (all.some((item) => item.packageName === packageName && item.className === className)) {
3452
3452
  continue;
3453
3453
  }
3454
- all.push({ packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser });
3454
+ all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
3455
3455
  }
3456
- for (const { packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser, } of $scrapersRegister.list()) {
3456
+ for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
3457
3457
  if (all.some((item) => item.packageName === packageName && item.className === className)) {
3458
3458
  continue;
3459
3459
  }
3460
- all.push({ packageName, className, mimeTypes, documentationUrl, isAvilableInBrowser });
3460
+ all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
3461
3461
  }
3462
3462
  for (const { metadata } of availableScrapers) {
3463
3463
  all.push(metadata);
@@ -3469,8 +3469,8 @@ function $registeredScrapersMessage(availableScrapers) {
3469
3469
  const isInstalled = $scrapersRegister
3470
3470
  .list()
3471
3471
  .find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
3472
- const isAvilableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
3473
- return { ...metadata, isMetadataAviailable, isInstalled, isAvilableInTools };
3472
+ const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
3473
+ return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
3474
3474
  });
3475
3475
  if (metadata.length === 0) {
3476
3476
  return spaceTrim$1(`
@@ -3483,7 +3483,7 @@ function $registeredScrapersMessage(availableScrapers) {
3483
3483
  return spaceTrim$1((block) => `
3484
3484
  Available scrapers are:
3485
3485
  ${block(metadata
3486
- .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvilableInBrowser, isAvilableInTools, }, i) => {
3486
+ .map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
3487
3487
  const more = [];
3488
3488
  // TODO: [🧠] Maybe use `documentationUrl`
3489
3489
  if (isMetadataAviailable) {
@@ -3492,16 +3492,16 @@ function $registeredScrapersMessage(availableScrapers) {
3492
3492
  if (isInstalled) {
3493
3493
  more.push(`🟩 Installed`);
3494
3494
  } // not else
3495
- if (isAvilableInTools) {
3495
+ if (isAvailableInTools) {
3496
3496
  more.push(`🟦 Available in tools`);
3497
3497
  } // not else
3498
3498
  if (!isMetadataAviailable && isInstalled) {
3499
3499
  more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
3500
3500
  } // not else
3501
- if (!isInstalled && isAvilableInTools) {
3501
+ if (!isInstalled && isAvailableInTools) {
3502
3502
  more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
3503
3503
  } // not else
3504
- if (!isAvilableInBrowser) {
3504
+ if (!isAvailableInBrowser) {
3505
3505
  more.push(`Not usable in browser`);
3506
3506
  }
3507
3507
  const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
@@ -3831,7 +3831,7 @@ TODO: [🧊] This is how it can look in future
3831
3831
  /**
3832
3832
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3833
3833
  * Put `knowledgePieces` into `PrepareKnowledgeOptions`
3834
- * TODO: [🪂] More than max things can run in parallel by acident [1,[2a,2b,_],[3a,3b,_]]
3834
+ * TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
3835
3835
  * TODO: [🧠][❎] Do here proper M:N mapping
3836
3836
  * [x] One source can make multiple pieces
3837
3837
  * [ ] One piece can have multiple sources
@@ -5503,10 +5503,10 @@ function knowledgePiecesToString(knowledgePieces) {
5503
5503
  */
5504
5504
  async function getKnowledgeForTask(options) {
5505
5505
  const { tools, preparedPipeline, task, parameters } = options;
5506
- const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5507
- const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5506
+ const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
5507
+ const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
5508
5508
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5509
- if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5509
+ if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
5510
5510
  return ''; // <- Note: Np knowledge present, return empty string
5511
5511
  }
5512
5512
  try {
@@ -5517,7 +5517,7 @@ async function getKnowledgeForTask(options) {
5517
5517
  title: 'Knowledge Search',
5518
5518
  modelRequirements: {
5519
5519
  modelVariant: 'EMBEDDING',
5520
- modelName: firstKnowlegeIndex.modelName,
5520
+ modelName: firstKnowledgeIndex.modelName,
5521
5521
  },
5522
5522
  content: task.content,
5523
5523
  parameters,
@@ -5525,7 +5525,7 @@ async function getKnowledgeForTask(options) {
5525
5525
  const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5526
5526
  const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5527
5527
  const { index } = knowledgePiece;
5528
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5528
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
5529
5529
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5530
5530
  if (knowledgePieceIndex === undefined) {
5531
5531
  return {
@@ -5546,8 +5546,8 @@ async function getKnowledgeForTask(options) {
5546
5546
  task,
5547
5547
  taskEmbeddingPrompt,
5548
5548
  taskEmbeddingResult,
5549
- firstKnowlegePiece,
5550
- firstKnowlegeIndex,
5549
+ firstKnowledgePiece,
5550
+ firstKnowledgeIndex,
5551
5551
  knowledgePiecesWithRelevance,
5552
5552
  knowledgePiecesSorted,
5553
5553
  knowledgePiecesLimited,
@@ -5616,7 +5616,7 @@ async function getReservedParametersForTask(options) {
5616
5616
  * @private internal utility of `createPipelineExecutor`
5617
5617
  */
5618
5618
  async function executeTask(options) {
5619
- const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSupressed, } = options;
5619
+ const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
5620
5620
  const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
5621
5621
  // Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
5622
5622
  const usedParameterNames = extractParameterNamesFromTask(currentTask);
@@ -5704,7 +5704,7 @@ async function executeTask(options) {
5704
5704
  cacheDirname,
5705
5705
  intermediateFilesStrategy,
5706
5706
  isAutoInstalled,
5707
- isNotPreparedWarningSupressed,
5707
+ isNotPreparedWarningSuppressed,
5708
5708
  });
5709
5709
  await onProgress({
5710
5710
  outputParameters: {
@@ -5799,7 +5799,7 @@ async function executePipeline(options) {
5799
5799
  }
5800
5800
  return exportJson({
5801
5801
  name: `executionReport`,
5802
- message: `Unuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
5802
+ message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
5803
5803
  order: [],
5804
5804
  value: {
5805
5805
  isSuccessful: false,
@@ -5836,7 +5836,7 @@ async function executePipeline(options) {
5836
5836
  return exportJson({
5837
5837
  name: 'pipelineExecutorResult',
5838
5838
  message: spaceTrim((block) => `
5839
- Unuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
5839
+ Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
5840
5840
 
5841
5841
  ${block(pipelineIdentification)}
5842
5842
  `),
@@ -5977,7 +5977,7 @@ async function executePipeline(options) {
5977
5977
  }
5978
5978
  return exportJson({
5979
5979
  name: 'pipelineExecutorResult',
5980
- message: `Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
5980
+ message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
5981
5981
  order: [],
5982
5982
  value: {
5983
5983
  isSuccessful: false,
@@ -6028,7 +6028,7 @@ async function executePipeline(options) {
6028
6028
  * @public exported from `@promptbook/core`
6029
6029
  */
6030
6030
  function createPipelineExecutor(options) {
6031
- const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSupressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
6031
+ const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
6032
6032
  validatePipeline(pipeline);
6033
6033
  const pipelineIdentification = (() => {
6034
6034
  // Note: This is a 😐 implementation of [🚞]
@@ -6045,7 +6045,7 @@ function createPipelineExecutor(options) {
6045
6045
  if (isPipelinePrepared(pipeline)) {
6046
6046
  preparedPipeline = pipeline;
6047
6047
  }
6048
- else if (isNotPreparedWarningSupressed !== true) {
6048
+ else if (isNotPreparedWarningSuppressed !== true) {
6049
6049
  console.warn(spaceTrim((block) => `
6050
6050
  Pipeline is not prepared
6051
6051
 
@@ -6078,7 +6078,7 @@ function createPipelineExecutor(options) {
6078
6078
  maxParallelCount,
6079
6079
  csvSettings,
6080
6080
  isVerbose,
6081
- isNotPreparedWarningSupressed,
6081
+ isNotPreparedWarningSuppressed,
6082
6082
  rootDirname,
6083
6083
  cacheDirname,
6084
6084
  intermediateFilesStrategy,
@@ -6087,7 +6087,7 @@ function createPipelineExecutor(options) {
6087
6087
  assertsError(error);
6088
6088
  return exportJson({
6089
6089
  name: 'pipelineExecutorResult',
6090
- message: `Unuccessful PipelineExecutorResult, last catch`,
6090
+ message: `Unsuccessful PipelineExecutorResult, last catch`,
6091
6091
  order: [],
6092
6092
  value: {
6093
6093
  isSuccessful: false,
@@ -6125,7 +6125,7 @@ const markdownScraperMetadata = $deepFreeze({
6125
6125
  className: 'MarkdownScraper',
6126
6126
  mimeTypes: ['text/markdown', 'text/plain'],
6127
6127
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
6128
- isAvilableInBrowser: true,
6128
+ isAvailableInBrowser: true,
6129
6129
  // <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
6130
6130
  requiredExecutables: [],
6131
6131
  }); /* <- Note: [🤛] */
@@ -6135,7 +6135,7 @@ const markdownScraperMetadata = $deepFreeze({
6135
6135
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
6136
6136
  *
6137
6137
  * @public exported from `@promptbook/core`
6138
- * @public exported from `@promptbook/wizzard`
6138
+ * @public exported from `@promptbook/wizard`
6139
6139
  * @public exported from `@promptbook/cli`
6140
6140
  */
6141
6141
  $scrapersMetadataRegister.register(markdownScraperMetadata);
@@ -6234,7 +6234,7 @@ class MarkdownScraper {
6234
6234
  }
6235
6235
  // ---
6236
6236
  if (!llmTools.callEmbeddingModel) {
6237
- // TODO: [🟥] Detect browser / node and make it colorfull
6237
+ // TODO: [🟥] Detect browser / node and make it colorful
6238
6238
  console.error('No callEmbeddingModel function provided');
6239
6239
  }
6240
6240
  else {
@@ -6260,7 +6260,7 @@ class MarkdownScraper {
6260
6260
  if (!(error instanceof PipelineExecutionError)) {
6261
6261
  throw error;
6262
6262
  }
6263
- // TODO: [🟥] Detect browser / node and make it colorfull
6263
+ // TODO: [🟥] Detect browser / node and make it colorful
6264
6264
  console.error(error, "<- Note: This error is not critical to prepare the pipeline, just knowledge pieces won't have embeddings");
6265
6265
  }
6266
6266
  return {
@@ -6291,7 +6291,7 @@ const documentScraperMetadata = $deepFreeze({
6291
6291
  className: 'DocumentScraper',
6292
6292
  mimeTypes: ['application/vnd.openxmlformats-officedocument.wordprocessingml.document'],
6293
6293
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
6294
- isAvilableInBrowser: false,
6294
+ isAvailableInBrowser: false,
6295
6295
  // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
6296
6296
  requiredExecutables: ['Pandoc'],
6297
6297
  }); /* <- Note: [🤛] */
@@ -6301,7 +6301,7 @@ const documentScraperMetadata = $deepFreeze({
6301
6301
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
6302
6302
  *
6303
6303
  * @public exported from `@promptbook/core`
6304
- * @public exported from `@promptbook/wizzard`
6304
+ * @public exported from `@promptbook/wizard`
6305
6305
  * @public exported from `@promptbook/cli`
6306
6306
  */
6307
6307
  $scrapersMetadataRegister.register(documentScraperMetadata);
@@ -6427,7 +6427,7 @@ const legacyDocumentScraperMetadata = $deepFreeze({
6427
6427
  className: 'LegacyDocumentScraper',
6428
6428
  mimeTypes: ['application/msword', 'text/rtf'],
6429
6429
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
6430
- isAvilableInBrowser: false,
6430
+ isAvailableInBrowser: false,
6431
6431
  // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
6432
6432
  requiredExecutables: [
6433
6433
  'Pandoc',
@@ -6441,7 +6441,7 @@ const legacyDocumentScraperMetadata = $deepFreeze({
6441
6441
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
6442
6442
  *
6443
6443
  * @public exported from `@promptbook/core`
6444
- * @public exported from `@promptbook/wizzard`
6444
+ * @public exported from `@promptbook/wizard`
6445
6445
  * @public exported from `@promptbook/cli`
6446
6446
  */
6447
6447
  $scrapersMetadataRegister.register(legacyDocumentScraperMetadata);
@@ -6602,7 +6602,7 @@ const createLegacyDocumentScraper = Object.assign((tools, options) => {
6602
6602
  * Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
6603
6603
  *
6604
6604
  * @public exported from `@promptbook/legacy-documents`
6605
- * @public exported from `@promptbook/wizzard`
6605
+ * @public exported from `@promptbook/wizard`
6606
6606
  * @public exported from `@promptbook/cli`
6607
6607
  */
6608
6608
  const _LegacyDocumentScraperRegistration = $scrapersRegister.register(createLegacyDocumentScraper);