@promptbook/pdf 0.81.0-19 → 0.81.0-22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/README.md +39 -3
  2. package/esm/index.es.js +67 -16
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/books/index.d.ts +38 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +4 -4
  6. package/esm/typings/src/config.d.ts +1 -1
  7. package/esm/typings/src/conversion/compilePipeline.d.ts +1 -4
  8. package/esm/typings/src/conversion/{precompilePipeline.d.ts → parsePipeline.d.ts} +2 -2
  9. package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +1 -1
  10. package/esm/typings/src/high-level-abstractions/index.d.ts +1 -1
  11. package/esm/typings/src/pipeline/book-notation.d.ts +2 -2
  12. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +2 -0
  13. package/esm/typings/src/prepare/preparePipeline.d.ts +2 -0
  14. package/esm/typings/src/scrapers/_common/Converter.d.ts +1 -0
  15. package/esm/typings/src/scrapers/_common/Scraper.d.ts +1 -1
  16. package/esm/typings/src/scrapers/_common/ScraperIntermediateSource.d.ts +3 -0
  17. package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +2 -0
  18. package/esm/typings/src/scrapers/pdf/PdfScraper.d.ts +1 -0
  19. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
  20. package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +1 -1
  21. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
  22. package/esm/typings/src/scrapers/website/register-metadata.d.ts +1 -1
  23. package/esm/typings/src/utils/markdown/flattenMarkdown.d.ts +1 -1
  24. package/esm/typings/src/utils/organization/$sideEffect.d.ts +9 -0
  25. package/esm/typings/src/wizzard/wizzard.d.ts +23 -11
  26. package/package.json +2 -2
  27. package/umd/index.umd.js +67 -16
  28. package/umd/index.umd.js.map +1 -1
  29. /package/esm/typings/src/conversion/{precompilePipeline.test.d.ts → parsePipeline.test.d.ts} +0 -0
package/README.md CHANGED
@@ -128,11 +128,38 @@ Promptbook project is ecosystem of multiple projects and tools, following is a l
128
128
  </tbody>
129
129
  </table>
130
130
 
131
- Also we have a community of developers and users:
131
+ We also have a community of developers and users of **Promptbook**:
132
132
 
133
- - [Discord](https://discord.gg/x3QWNaa89N)
134
- - [Landing page](https://ptbk.io)
133
+ - [Discord community](https://discord.gg/x3QWNaa89N)
134
+ - [Landing page `ptbk.io`](https://ptbk.io)
135
135
  - [Github discussions](https://github.com/webgptorg/promptbook/discussions)
136
+ - [LinkedIn `Promptbook`](https://linkedin.com/company/promptbook)
137
+ - [Facebook `Promptbook`](https://www.facebook.com/61560776453536)
138
+
139
+ And **Promptbook.studio** branded socials:
140
+
141
+
142
+
143
+ - [Instagram `@promptbook.studio`](https://www.instagram.com/promptbook.studio/)
144
+
145
+
146
+
147
+ And **Promptujeme** sub-brand:
148
+
149
+ */Subbrand for Czech clients/*
150
+
151
+
152
+
153
+ - [Promptujeme.cz](https://www.promptujeme.cz/)
154
+ - [Facebook `Promptujeme`](https://www.facebook.com/promptujeme/)
155
+
156
+
157
+ And **Promptbook.city** branded socials:
158
+
159
+ */Sub-brand for images and graphics generated via Promptbook prompting/*
160
+
161
+ - [Instagram `@promptbook.city`](https://www.instagram.com/promptbook.city/)
162
+ - [Facebook `Promptbook City`](https://www.facebook.com/61565718625569)
136
163
 
137
164
 
138
165
 
@@ -266,6 +293,11 @@ Or you can install them separately:
266
293
 
267
294
  ## 📚 Dictionary
268
295
 
296
+
297
+
298
+
299
+
300
+
269
301
  ### 📚 Dictionary
270
302
 
271
303
  The following glossary is used to clarify certain concepts:
@@ -281,6 +313,8 @@ The following glossary is used to clarify certain concepts:
281
313
  - **Retrieval-augmented generation** is a machine learning paradigm where a model generates text by retrieving relevant information from a large database of text. This approach combines the benefits of generative models and retrieval models.
282
314
  - **Longtail** refers to non-common or rare events, items, or entities that are not well-represented in the training data of machine learning models. Longtail items are often challenging for models to predict accurately.
283
315
 
316
+
317
+
284
318
  _Note: Thos section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
285
319
 
286
320
  #### Promptbook core
@@ -341,6 +375,8 @@ _Note: Thos section is not complete dictionary, more list of general AI / LLM te
341
375
  - [👮 Agent adversary expectations](https://github.com/webgptorg/promptbook/discussions/39)
342
376
  - [view more](https://github.com/webgptorg/promptbook/discussions/categories/concepts)
343
377
 
378
+
379
+
344
380
  ### Terms specific to Promptbook TypeScript implementation
345
381
 
346
382
  - Anonymous mode
package/esm/index.es.js CHANGED
@@ -22,7 +22,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
22
22
  * @generated
23
23
  * @see https://github.com/webgptorg/promptbook
24
24
  */
25
- var PROMPTBOOK_ENGINE_VERSION = '0.81.0-18';
25
+ var PROMPTBOOK_ENGINE_VERSION = '0.81.0-21';
26
26
  /**
27
27
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
28
28
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -176,7 +176,7 @@ var NotYetImplementedError = /** @class */ (function (_super) {
176
176
  function TODO_USE() {
177
177
  }
178
178
 
179
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"}];
179
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book.md",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book.md`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book.md"}];
180
180
 
181
181
  /**
182
182
  * Prettify the html code
@@ -468,6 +468,12 @@ var ADMIN_EMAIL = 'me@pavolhejny.com';
468
468
  * @public exported from `@promptbook/core`
469
469
  */
470
470
  var ADMIN_GITHUB_NAME = 'hejny';
471
+ /**
472
+ * When the title is not provided, the default title is used
473
+ *
474
+ * @public exported from `@promptbook/core`
475
+ */
476
+ var DEFAULT_BOOK_TITLE = "\u2728 Untitled Book";
471
477
  // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
472
478
  /**
473
479
  * The maximum number of iterations for a loops
@@ -1814,11 +1820,16 @@ function assertsExecutionSuccessful(executionResult) {
1814
1820
  /**
1815
1821
  * Determine if the pipeline is fully prepared
1816
1822
  *
1823
+ * @see https://github.com/webgptorg/promptbook/discussions/196
1824
+ *
1817
1825
  * @public exported from `@promptbook/core`
1818
1826
  */
1819
1827
  function isPipelinePrepared(pipeline) {
1820
1828
  // Note: Ignoring `pipeline.preparations` @@@
1821
1829
  // Note: Ignoring `pipeline.knowledgePieces` @@@
1830
+ if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
1831
+ return false;
1832
+ }
1822
1833
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
1823
1834
  return false;
1824
1835
  }
@@ -3557,7 +3568,7 @@ function prepareKnowledgePieces(knowledgeSources, tools, options) {
3557
3568
  partialPieces = __spreadArray([], __read(partialPiecesUnchecked), false);
3558
3569
  return [2 /*return*/, "break"];
3559
3570
  }
3560
- console.warn(spaceTrim$1(function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n \n The source:\n > ").concat(block(knowledgeSource.sourceContent
3571
+ console.warn(spaceTrim$1(function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n\n The source:\n ").concat(block(knowledgeSource.sourceContent
3561
3572
  .split('\n')
3562
3573
  .map(function (line) { return "> ".concat(line); })
3563
3574
  .join('\n')), "\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
@@ -3595,7 +3606,7 @@ function prepareKnowledgePieces(knowledgeSources, tools, options) {
3595
3606
  return [7 /*endfinally*/];
3596
3607
  case 9:
3597
3608
  if (partialPieces === null) {
3598
- throw new KnowledgeScrapeError(spaceTrim$1(function (block) { return "\n Cannot scrape knowledge\n \n The source:\n > ".concat(block(knowledgeSource.sourceContent
3609
+ throw new KnowledgeScrapeError(spaceTrim$1(function (block) { return "\n Cannot scrape knowledge\n\n The source:\n > ".concat(block(knowledgeSource.sourceContent
3599
3610
  .split('\n')
3600
3611
  .map(function (line) { return "> ".concat(line); })
3601
3612
  .join('\n')), "\n\n No scraper found for the mime type \"").concat(sourceHandler.mimeType, "\"\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
@@ -3696,6 +3707,8 @@ function prepareTasks(pipeline, tools, options) {
3696
3707
  /**
3697
3708
  * Prepare pipeline from string (markdown) format to JSON format
3698
3709
  *
3710
+ * @see https://github.com/webgptorg/promptbook/discussions/196
3711
+ *
3699
3712
  * Note: This function does not validate logic of the pipeline
3700
3713
  * Note: This function acts as part of compilation process
3701
3714
  * Note: When the pipeline is already prepared, it returns the same pipeline
@@ -3708,16 +3721,17 @@ function preparePipeline(pipeline, tools, options) {
3708
3721
  <- TODO: [🧠][🪑] `promptbookVersion` */
3709
3722
  knowledgeSources /*
3710
3723
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3711
- <- TODO: [🧊] `preparations` */, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
3724
+ <- TODO: [🧊] `preparations` */, sources, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, title, collection, prepareTitleExecutor, _c, result, outputParameters, titleRaw, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
3725
+ var _d;
3712
3726
  var _this = this;
3713
- return __generator(this, function (_c) {
3714
- switch (_c.label) {
3727
+ return __generator(this, function (_e) {
3728
+ switch (_e.label) {
3715
3729
  case 0:
3716
3730
  if (isPipelinePrepared(pipeline)) {
3717
3731
  return [2 /*return*/, pipeline];
3718
3732
  }
3719
3733
  rootDirname = options.rootDirname, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? DEFAULT_MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? DEFAULT_IS_VERBOSE : _b;
3720
- parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3734
+ parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas, sources = pipeline.sources;
3721
3735
  if (tools === undefined || tools.llm === undefined) {
3722
3736
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
3723
3737
  }
@@ -3735,6 +3749,40 @@ function preparePipeline(pipeline, tools, options) {
3735
3749
  // <- TODO: [🧊]
3736
3750
  currentPreparation,
3737
3751
  ];
3752
+ title = pipeline.title;
3753
+ if (!(title === undefined || title === '' || title === DEFAULT_BOOK_TITLE)) return [3 /*break*/, 3];
3754
+ collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
3755
+ _c = createPipelineExecutor;
3756
+ _d = {};
3757
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-title.book.md')];
3758
+ case 1:
3759
+ prepareTitleExecutor = _c.apply(void 0, [(_d.pipeline = _e.sent(),
3760
+ _d.tools = tools,
3761
+ _d)]);
3762
+ return [4 /*yield*/, prepareTitleExecutor({
3763
+ book: sources
3764
+ .map(function (_a) {
3765
+ var content = _a.content;
3766
+ return content;
3767
+ })
3768
+ .join('\n\n')
3769
+ // TODO: !!!!!!! Parameters in parameters - DO NOT ALLOW, ESCAPE:
3770
+ .split('{')
3771
+ .join('[')
3772
+ .split('}')
3773
+ .join(']'),
3774
+ })];
3775
+ case 2:
3776
+ result = _e.sent();
3777
+ assertsExecutionSuccessful(result);
3778
+ outputParameters = result.outputParameters;
3779
+ titleRaw = outputParameters.title;
3780
+ if (isVerbose) {
3781
+ console.info("The title is \"".concat(titleRaw, "\""));
3782
+ }
3783
+ title = titleRaw || DEFAULT_BOOK_TITLE;
3784
+ _e.label = 3;
3785
+ case 3:
3738
3786
  preparedPersonas = new Array(personas.length);
3739
3787
  return [4 /*yield*/, forEachAsync(personas, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (persona, index) { return __awaiter(_this, void 0, void 0, function () {
3740
3788
  var modelRequirements, preparedPersona;
@@ -3753,12 +3801,12 @@ function preparePipeline(pipeline, tools, options) {
3753
3801
  }
3754
3802
  });
3755
3803
  }); })];
3756
- case 1:
3757
- _c.sent();
3804
+ case 4:
3805
+ _e.sent();
3758
3806
  knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3759
3807
  return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, __assign(__assign({}, tools), { llm: llmToolsWithUsage }), __assign(__assign({}, options), { rootDirname: rootDirname, maxParallelCount: maxParallelCount /* <- TODO: [🪂] */, isVerbose: isVerbose }))];
3760
- case 2:
3761
- partialknowledgePiecesPrepared = _c.sent();
3808
+ case 5:
3809
+ partialknowledgePiecesPrepared = _e.sent();
3762
3810
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3763
3811
  return [4 /*yield*/, prepareTasks({
3764
3812
  parameters: parameters,
@@ -3769,8 +3817,8 @@ function preparePipeline(pipeline, tools, options) {
3769
3817
  maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3770
3818
  isVerbose: isVerbose,
3771
3819
  })];
3772
- case 3:
3773
- tasksPrepared = (_c.sent()).tasksPrepared;
3820
+ case 6:
3821
+ tasksPrepared = (_e.sent()).tasksPrepared;
3774
3822
  // ----- /Tasks preparation -----
3775
3823
  // TODO: [😂] Use here all `AsyncHighLevelAbstraction`
3776
3824
  // Note: Count total usage
@@ -3781,7 +3829,7 @@ function preparePipeline(pipeline, tools, options) {
3781
3829
  order: ORDER_OF_PIPELINE_JSON,
3782
3830
  value: __assign(__assign({}, pipeline), {
3783
3831
  // <- TODO: Probbably deeply clone the pipeline because `$exportJson` freezes the subobjects
3784
- knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
3832
+ title: title, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
3785
3833
  // <- TODO: [🪓] Here should be no need for spreading new array, just ` tasks: tasksPrepared`
3786
3834
  personas: preparedPersonas, preparations: __spreadArray([], __read(preparations), false) }),
3787
3835
  })];
@@ -5876,6 +5924,7 @@ var markdownScraperMetadata = $deepFreeze({
5876
5924
  mimeTypes: ['text/markdown', 'text/plain'],
5877
5925
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
5878
5926
  isAvilableInBrowser: true,
5927
+ // <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
5879
5928
  requiredExecutables: [],
5880
5929
  }); /* <- Note: [🤛] */
5881
5930
  /**
@@ -6073,7 +6122,8 @@ var pdfScraperMetadata = $deepFreeze({
6073
6122
  className: 'PdfScraper',
6074
6123
  mimeTypes: ['application/pdf'],
6075
6124
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
6076
- isAvilableInBrowser: true,
6125
+ isAvilableInBrowser: false,
6126
+ // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
6077
6127
  requiredExecutables: [],
6078
6128
  }); /* <- Note: [🤛] */
6079
6129
  /**
@@ -6147,6 +6197,7 @@ var PdfScraper = /** @class */ (function () {
6147
6197
  * TODO: [👣] Converted pdf documents can act as cached items - there is no need to run conversion each time
6148
6198
  * TODO: [🪂] Do it in parallel 11:11
6149
6199
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
6200
+ * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
6150
6201
  */
6151
6202
 
6152
6203
  /**