@promptbook/markdown-utils 0.81.0-19 → 0.81.0-22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/README.md +39 -3
  2. package/esm/index.es.js +63 -20
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/books/index.d.ts +38 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +4 -4
  6. package/esm/typings/src/config.d.ts +1 -1
  7. package/esm/typings/src/conversion/compilePipeline.d.ts +1 -4
  8. package/esm/typings/src/conversion/{precompilePipeline.d.ts → parsePipeline.d.ts} +2 -2
  9. package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +1 -1
  10. package/esm/typings/src/high-level-abstractions/index.d.ts +1 -1
  11. package/esm/typings/src/pipeline/book-notation.d.ts +2 -2
  12. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +2 -0
  13. package/esm/typings/src/prepare/preparePipeline.d.ts +2 -0
  14. package/esm/typings/src/scrapers/_common/Converter.d.ts +1 -0
  15. package/esm/typings/src/scrapers/_common/Scraper.d.ts +1 -1
  16. package/esm/typings/src/scrapers/_common/ScraperIntermediateSource.d.ts +3 -0
  17. package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +2 -0
  18. package/esm/typings/src/scrapers/pdf/PdfScraper.d.ts +1 -0
  19. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
  20. package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +1 -1
  21. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
  22. package/esm/typings/src/scrapers/website/register-metadata.d.ts +1 -1
  23. package/esm/typings/src/utils/markdown/flattenMarkdown.d.ts +1 -1
  24. package/esm/typings/src/utils/organization/$sideEffect.d.ts +9 -0
  25. package/esm/typings/src/wizzard/wizzard.d.ts +23 -11
  26. package/package.json +1 -1
  27. package/umd/index.umd.js +63 -20
  28. package/umd/index.umd.js.map +1 -1
  29. /package/esm/typings/src/conversion/{precompilePipeline.test.d.ts → parsePipeline.test.d.ts} +0 -0
package/README.md CHANGED
@@ -126,11 +126,38 @@ Promptbook project is ecosystem of multiple projects and tools, following is a l
126
126
  </tbody>
127
127
  </table>
128
128
 
129
- Also we have a community of developers and users:
129
+ We also have a community of developers and users of **Promptbook**:
130
130
 
131
- - [Discord](https://discord.gg/x3QWNaa89N)
132
- - [Landing page](https://ptbk.io)
131
+ - [Discord community](https://discord.gg/x3QWNaa89N)
132
+ - [Landing page `ptbk.io`](https://ptbk.io)
133
133
  - [Github discussions](https://github.com/webgptorg/promptbook/discussions)
134
+ - [LinkedIn `Promptbook`](https://linkedin.com/company/promptbook)
135
+ - [Facebook `Promptbook`](https://www.facebook.com/61560776453536)
136
+
137
+ And **Promptbook.studio** branded socials:
138
+
139
+
140
+
141
+ - [Instagram `@promptbook.studio`](https://www.instagram.com/promptbook.studio/)
142
+
143
+
144
+
145
+ And **Promptujeme** sub-brand:
146
+
147
+ */Subbrand for Czech clients/*
148
+
149
+
150
+
151
+ - [Promptujeme.cz](https://www.promptujeme.cz/)
152
+ - [Facebook `Promptujeme`](https://www.facebook.com/promptujeme/)
153
+
154
+
155
+ And **Promptbook.city** branded socials:
156
+
157
+ */Sub-brand for images and graphics generated via Promptbook prompting/*
158
+
159
+ - [Instagram `@promptbook.city`](https://www.instagram.com/promptbook.city/)
160
+ - [Facebook `Promptbook City`](https://www.facebook.com/61565718625569)
134
161
 
135
162
 
136
163
 
@@ -264,6 +291,11 @@ Or you can install them separately:
264
291
 
265
292
  ## 📚 Dictionary
266
293
 
294
+
295
+
296
+
297
+
298
+
267
299
  ### 📚 Dictionary
268
300
 
269
301
  The following glossary is used to clarify certain concepts:
@@ -279,6 +311,8 @@ The following glossary is used to clarify certain concepts:
279
311
  - **Retrieval-augmented generation** is a machine learning paradigm where a model generates text by retrieving relevant information from a large database of text. This approach combines the benefits of generative models and retrieval models.
280
312
  - **Longtail** refers to non-common or rare events, items, or entities that are not well-represented in the training data of machine learning models. Longtail items are often challenging for models to predict accurately.
281
313
 
314
+
315
+
282
316
  _Note: Thos section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
283
317
 
284
318
  #### Promptbook core
@@ -339,6 +373,8 @@ _Note: Thos section is not complete dictionary, more list of general AI / LLM te
339
373
  - [👮 Agent adversary expectations](https://github.com/webgptorg/promptbook/discussions/39)
340
374
  - [view more](https://github.com/webgptorg/promptbook/discussions/categories/concepts)
341
375
 
376
+
377
+
342
378
  ### Terms specific to Promptbook TypeScript implementation
343
379
 
344
380
  - Anonymous mode
package/esm/index.es.js CHANGED
@@ -22,7 +22,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
22
22
  * @generated
23
23
  * @see https://github.com/webgptorg/promptbook
24
24
  */
25
- var PROMPTBOOK_ENGINE_VERSION = '0.81.0-18';
25
+ var PROMPTBOOK_ENGINE_VERSION = '0.81.0-21';
26
26
  /**
27
27
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
28
28
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -358,7 +358,7 @@ function extractJsonBlock(markdown) {
358
358
  * TODO: [🏢] Make this logic part of `JsonFormatDefinition` or `isValidJsonString`
359
359
  */
360
360
 
361
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"}];
361
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book.md",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book.md`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book.md"}];
362
362
 
363
363
  /**
364
364
  * Prettify the html code
@@ -652,7 +652,7 @@ var ADMIN_GITHUB_NAME = 'hejny';
652
652
  *
653
653
  * @public exported from `@promptbook/core`
654
654
  */
655
- var DEFAULT_TITLE = "Untitled";
655
+ var DEFAULT_BOOK_TITLE = "\u2728 Untitled Book";
656
656
  // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
657
657
  /**
658
658
  * The maximum number of iterations for a loops
@@ -1996,11 +1996,16 @@ function assertsExecutionSuccessful(executionResult) {
1996
1996
  /**
1997
1997
  * Determine if the pipeline is fully prepared
1998
1998
  *
1999
+ * @see https://github.com/webgptorg/promptbook/discussions/196
2000
+ *
1999
2001
  * @public exported from `@promptbook/core`
2000
2002
  */
2001
2003
  function isPipelinePrepared(pipeline) {
2002
2004
  // Note: Ignoring `pipeline.preparations` @@@
2003
2005
  // Note: Ignoring `pipeline.knowledgePieces` @@@
2006
+ if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
2007
+ return false;
2008
+ }
2004
2009
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
2005
2010
  return false;
2006
2011
  }
@@ -3739,7 +3744,7 @@ function prepareKnowledgePieces(knowledgeSources, tools, options) {
3739
3744
  partialPieces = __spreadArray([], __read(partialPiecesUnchecked), false);
3740
3745
  return [2 /*return*/, "break"];
3741
3746
  }
3742
- console.warn(spaceTrim(function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n \n The source:\n > ").concat(block(knowledgeSource.sourceContent
3747
+ console.warn(spaceTrim(function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n\n The source:\n ").concat(block(knowledgeSource.sourceContent
3743
3748
  .split('\n')
3744
3749
  .map(function (line) { return "> ".concat(line); })
3745
3750
  .join('\n')), "\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
@@ -3777,7 +3782,7 @@ function prepareKnowledgePieces(knowledgeSources, tools, options) {
3777
3782
  return [7 /*endfinally*/];
3778
3783
  case 9:
3779
3784
  if (partialPieces === null) {
3780
- throw new KnowledgeScrapeError(spaceTrim(function (block) { return "\n Cannot scrape knowledge\n \n The source:\n > ".concat(block(knowledgeSource.sourceContent
3785
+ throw new KnowledgeScrapeError(spaceTrim(function (block) { return "\n Cannot scrape knowledge\n\n The source:\n > ".concat(block(knowledgeSource.sourceContent
3781
3786
  .split('\n')
3782
3787
  .map(function (line) { return "> ".concat(line); })
3783
3788
  .join('\n')), "\n\n No scraper found for the mime type \"").concat(sourceHandler.mimeType, "\"\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
@@ -3878,6 +3883,8 @@ function prepareTasks(pipeline, tools, options) {
3878
3883
  /**
3879
3884
  * Prepare pipeline from string (markdown) format to JSON format
3880
3885
  *
3886
+ * @see https://github.com/webgptorg/promptbook/discussions/196
3887
+ *
3881
3888
  * Note: This function does not validate logic of the pipeline
3882
3889
  * Note: This function acts as part of compilation process
3883
3890
  * Note: When the pipeline is already prepared, it returns the same pipeline
@@ -3890,16 +3897,17 @@ function preparePipeline(pipeline, tools, options) {
3890
3897
  <- TODO: [🧠][🪑] `promptbookVersion` */
3891
3898
  knowledgeSources /*
3892
3899
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3893
- <- TODO: [🧊] `preparations` */, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
3900
+ <- TODO: [🧊] `preparations` */, sources, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, title, collection, prepareTitleExecutor, _c, result, outputParameters, titleRaw, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
3901
+ var _d;
3894
3902
  var _this = this;
3895
- return __generator(this, function (_c) {
3896
- switch (_c.label) {
3903
+ return __generator(this, function (_e) {
3904
+ switch (_e.label) {
3897
3905
  case 0:
3898
3906
  if (isPipelinePrepared(pipeline)) {
3899
3907
  return [2 /*return*/, pipeline];
3900
3908
  }
3901
3909
  rootDirname = options.rootDirname, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? DEFAULT_MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? DEFAULT_IS_VERBOSE : _b;
3902
- parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3910
+ parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas, sources = pipeline.sources;
3903
3911
  if (tools === undefined || tools.llm === undefined) {
3904
3912
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
3905
3913
  }
@@ -3917,6 +3925,40 @@ function preparePipeline(pipeline, tools, options) {
3917
3925
  // <- TODO: [🧊]
3918
3926
  currentPreparation,
3919
3927
  ];
3928
+ title = pipeline.title;
3929
+ if (!(title === undefined || title === '' || title === DEFAULT_BOOK_TITLE)) return [3 /*break*/, 3];
3930
+ collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
3931
+ _c = createPipelineExecutor;
3932
+ _d = {};
3933
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-title.book.md')];
3934
+ case 1:
3935
+ prepareTitleExecutor = _c.apply(void 0, [(_d.pipeline = _e.sent(),
3936
+ _d.tools = tools,
3937
+ _d)]);
3938
+ return [4 /*yield*/, prepareTitleExecutor({
3939
+ book: sources
3940
+ .map(function (_a) {
3941
+ var content = _a.content;
3942
+ return content;
3943
+ })
3944
+ .join('\n\n')
3945
+ // TODO: !!!!!!! Parameters in parameters - DO NOT ALLOW, ESCAPE:
3946
+ .split('{')
3947
+ .join('[')
3948
+ .split('}')
3949
+ .join(']'),
3950
+ })];
3951
+ case 2:
3952
+ result = _e.sent();
3953
+ assertsExecutionSuccessful(result);
3954
+ outputParameters = result.outputParameters;
3955
+ titleRaw = outputParameters.title;
3956
+ if (isVerbose) {
3957
+ console.info("The title is \"".concat(titleRaw, "\""));
3958
+ }
3959
+ title = titleRaw || DEFAULT_BOOK_TITLE;
3960
+ _e.label = 3;
3961
+ case 3:
3920
3962
  preparedPersonas = new Array(personas.length);
3921
3963
  return [4 /*yield*/, forEachAsync(personas, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (persona, index) { return __awaiter(_this, void 0, void 0, function () {
3922
3964
  var modelRequirements, preparedPersona;
@@ -3935,12 +3977,12 @@ function preparePipeline(pipeline, tools, options) {
3935
3977
  }
3936
3978
  });
3937
3979
  }); })];
3938
- case 1:
3939
- _c.sent();
3980
+ case 4:
3981
+ _e.sent();
3940
3982
  knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3941
3983
  return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, __assign(__assign({}, tools), { llm: llmToolsWithUsage }), __assign(__assign({}, options), { rootDirname: rootDirname, maxParallelCount: maxParallelCount /* <- TODO: [🪂] */, isVerbose: isVerbose }))];
3942
- case 2:
3943
- partialknowledgePiecesPrepared = _c.sent();
3984
+ case 5:
3985
+ partialknowledgePiecesPrepared = _e.sent();
3944
3986
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3945
3987
  return [4 /*yield*/, prepareTasks({
3946
3988
  parameters: parameters,
@@ -3951,8 +3993,8 @@ function preparePipeline(pipeline, tools, options) {
3951
3993
  maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3952
3994
  isVerbose: isVerbose,
3953
3995
  })];
3954
- case 3:
3955
- tasksPrepared = (_c.sent()).tasksPrepared;
3996
+ case 6:
3997
+ tasksPrepared = (_e.sent()).tasksPrepared;
3956
3998
  // ----- /Tasks preparation -----
3957
3999
  // TODO: [😂] Use here all `AsyncHighLevelAbstraction`
3958
4000
  // Note: Count total usage
@@ -3963,7 +4005,7 @@ function preparePipeline(pipeline, tools, options) {
3963
4005
  order: ORDER_OF_PIPELINE_JSON,
3964
4006
  value: __assign(__assign({}, pipeline), {
3965
4007
  // <- TODO: Probbably deeply clone the pipeline because `$exportJson` freezes the subobjects
3966
- knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
4008
+ title: title, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
3967
4009
  // <- TODO: [🪓] Here should be no need for spreading new array, just ` tasks: tasksPrepared`
3968
4010
  personas: preparedPersonas, preparations: __spreadArray([], __read(preparations), false) }),
3969
4011
  })];
@@ -5921,6 +5963,7 @@ var markdownScraperMetadata = $deepFreeze({
5921
5963
  mimeTypes: ['text/markdown', 'text/plain'],
5922
5964
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
5923
5965
  isAvilableInBrowser: true,
5966
+ // <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
5924
5967
  requiredExecutables: [],
5925
5968
  }); /* <- Note: [🤛] */
5926
5969
  /**
@@ -6340,7 +6383,7 @@ function splitMarkdownIntoSections(markdown) {
6340
6383
  return;
6341
6384
  }
6342
6385
  if (!section.startsWith('#')) {
6343
- section = "# ".concat(DEFAULT_TITLE, "\n\n").concat(section);
6386
+ section = "# ".concat(DEFAULT_BOOK_TITLE, "\n\n").concat(section);
6344
6387
  }
6345
6388
  sections.push(section);
6346
6389
  buffer = [];
@@ -6395,7 +6438,7 @@ function splitMarkdownIntoSections(markdown) {
6395
6438
  /**
6396
6439
  * Normalizes the markdown by flattening the structure
6397
6440
  *
6398
- * - It always have h1 - if there is no h1 in the markdown, it will be added "# Untitled"
6441
+ * - It always have h1 - if there is no h1 in the markdown, it will be added `DEFAULT_BOOK_TITLE`
6399
6442
  * - All other headings are normalized to h2
6400
6443
  *
6401
6444
  * @public exported from `@promptbook/markdown-utils`
@@ -6404,7 +6447,7 @@ function flattenMarkdown(markdown) {
6404
6447
  var e_1, _a;
6405
6448
  var sections = splitMarkdownIntoSections(markdown);
6406
6449
  if (sections.length === 0) {
6407
- return "# ".concat(DEFAULT_TITLE);
6450
+ return "# ".concat(DEFAULT_BOOK_TITLE);
6408
6451
  }
6409
6452
  var flattenedMarkdown = '';
6410
6453
  var parsedSections = sections.map(parseMarkdownSection);
@@ -6415,7 +6458,7 @@ function flattenMarkdown(markdown) {
6415
6458
  }
6416
6459
  else {
6417
6460
  parsedSections.unshift(firstSection);
6418
- flattenedMarkdown += "# ".concat(DEFAULT_TITLE) + "\n\n"; // <- [🧠] Maybe 3 new lines?
6461
+ flattenedMarkdown += "# ".concat(DEFAULT_BOOK_TITLE) + "\n\n"; // <- [🧠] Maybe 3 new lines?
6419
6462
  }
6420
6463
  try {
6421
6464
  for (var parsedSections_1 = __values(parsedSections), parsedSections_1_1 = parsedSections_1.next(); !parsedSections_1_1.done; parsedSections_1_1 = parsedSections_1.next()) {