@promptbook/legacy-documents 0.81.0-19 → 0.81.0-22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +39 -3
- package/esm/index.es.js +66 -15
- package/esm/index.es.js.map +1 -1
- package/esm/typings/books/index.d.ts +38 -0
- package/esm/typings/src/_packages/core.index.d.ts +4 -4
- package/esm/typings/src/config.d.ts +1 -1
- package/esm/typings/src/conversion/compilePipeline.d.ts +1 -4
- package/esm/typings/src/conversion/{precompilePipeline.d.ts → parsePipeline.d.ts} +2 -2
- package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +1 -1
- package/esm/typings/src/high-level-abstractions/index.d.ts +1 -1
- package/esm/typings/src/pipeline/book-notation.d.ts +2 -2
- package/esm/typings/src/prepare/isPipelinePrepared.d.ts +2 -0
- package/esm/typings/src/prepare/preparePipeline.d.ts +2 -0
- package/esm/typings/src/scrapers/_common/Converter.d.ts +1 -0
- package/esm/typings/src/scrapers/_common/Scraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/ScraperIntermediateSource.d.ts +3 -0
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +2 -0
- package/esm/typings/src/scrapers/pdf/PdfScraper.d.ts +1 -0
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +1 -1
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +1 -1
- package/esm/typings/src/utils/markdown/flattenMarkdown.d.ts +1 -1
- package/esm/typings/src/utils/organization/$sideEffect.d.ts +9 -0
- package/esm/typings/src/wizzard/wizzard.d.ts +23 -11
- package/package.json +2 -2
- package/umd/index.umd.js +66 -15
- package/umd/index.umd.js.map +1 -1
- /package/esm/typings/src/conversion/{precompilePipeline.test.d.ts → parsePipeline.test.d.ts} +0 -0
package/README.md
CHANGED
|
@@ -128,11 +128,38 @@ Promptbook project is ecosystem of multiple projects and tools, following is a l
|
|
|
128
128
|
</tbody>
|
|
129
129
|
</table>
|
|
130
130
|
|
|
131
|
-
|
|
131
|
+
We also have a community of developers and users of **Promptbook**:
|
|
132
132
|
|
|
133
|
-
- [Discord](https://discord.gg/x3QWNaa89N)
|
|
134
|
-
- [Landing page](https://ptbk.io)
|
|
133
|
+
- [Discord community](https://discord.gg/x3QWNaa89N)
|
|
134
|
+
- [Landing page `ptbk.io`](https://ptbk.io)
|
|
135
135
|
- [Github discussions](https://github.com/webgptorg/promptbook/discussions)
|
|
136
|
+
- [LinkedIn `Promptbook`](https://linkedin.com/company/promptbook)
|
|
137
|
+
- [Facebook `Promptbook`](https://www.facebook.com/61560776453536)
|
|
138
|
+
|
|
139
|
+
And **Promptbook.studio** branded socials:
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
- [Instagram `@promptbook.studio`](https://www.instagram.com/promptbook.studio/)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
And **Promptujeme** sub-brand:
|
|
148
|
+
|
|
149
|
+
*/Subbrand for Czech clients/*
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
- [Promptujeme.cz](https://www.promptujeme.cz/)
|
|
154
|
+
- [Facebook `Promptujeme`](https://www.facebook.com/promptujeme/)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
And **Promptbook.city** branded socials:
|
|
158
|
+
|
|
159
|
+
*/Sub-brand for images and graphics generated via Promptbook prompting/*
|
|
160
|
+
|
|
161
|
+
- [Instagram `@promptbook.city`](https://www.instagram.com/promptbook.city/)
|
|
162
|
+
- [Facebook `Promptbook City`](https://www.facebook.com/61565718625569)
|
|
136
163
|
|
|
137
164
|
|
|
138
165
|
|
|
@@ -266,6 +293,11 @@ Or you can install them separately:
|
|
|
266
293
|
|
|
267
294
|
## 📚 Dictionary
|
|
268
295
|
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
|
|
269
301
|
### 📚 Dictionary
|
|
270
302
|
|
|
271
303
|
The following glossary is used to clarify certain concepts:
|
|
@@ -281,6 +313,8 @@ The following glossary is used to clarify certain concepts:
|
|
|
281
313
|
- **Retrieval-augmented generation** is a machine learning paradigm where a model generates text by retrieving relevant information from a large database of text. This approach combines the benefits of generative models and retrieval models.
|
|
282
314
|
- **Longtail** refers to non-common or rare events, items, or entities that are not well-represented in the training data of machine learning models. Longtail items are often challenging for models to predict accurately.
|
|
283
315
|
|
|
316
|
+
|
|
317
|
+
|
|
284
318
|
_Note: Thos section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
|
|
285
319
|
|
|
286
320
|
#### Promptbook core
|
|
@@ -341,6 +375,8 @@ _Note: Thos section is not complete dictionary, more list of general AI / LLM te
|
|
|
341
375
|
- [👮 Agent adversary expectations](https://github.com/webgptorg/promptbook/discussions/39)
|
|
342
376
|
- [view more](https://github.com/webgptorg/promptbook/discussions/categories/concepts)
|
|
343
377
|
|
|
378
|
+
|
|
379
|
+
|
|
344
380
|
### Terms specific to Promptbook TypeScript implementation
|
|
345
381
|
|
|
346
382
|
- Anonymous mode
|
package/esm/index.es.js
CHANGED
|
@@ -25,7 +25,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
25
25
|
* @generated
|
|
26
26
|
* @see https://github.com/webgptorg/promptbook
|
|
27
27
|
*/
|
|
28
|
-
var PROMPTBOOK_ENGINE_VERSION = '0.81.0-
|
|
28
|
+
var PROMPTBOOK_ENGINE_VERSION = '0.81.0-21';
|
|
29
29
|
/**
|
|
30
30
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
31
31
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -190,6 +190,12 @@ var ADMIN_EMAIL = 'me@pavolhejny.com';
|
|
|
190
190
|
* @public exported from `@promptbook/core`
|
|
191
191
|
*/
|
|
192
192
|
var ADMIN_GITHUB_NAME = 'hejny';
|
|
193
|
+
/**
|
|
194
|
+
* When the title is not provided, the default title is used
|
|
195
|
+
*
|
|
196
|
+
* @public exported from `@promptbook/core`
|
|
197
|
+
*/
|
|
198
|
+
var DEFAULT_BOOK_TITLE = "\u2728 Untitled Book";
|
|
193
199
|
// <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
|
|
194
200
|
/**
|
|
195
201
|
* The maximum number of iterations for a loops
|
|
@@ -1108,7 +1114,7 @@ function getScraperIntermediateSource(source, options) {
|
|
|
1108
1114
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
1109
1115
|
*/
|
|
1110
1116
|
|
|
1111
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"}];
|
|
1117
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book.md",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book.md`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book.md"}];
|
|
1112
1118
|
|
|
1113
1119
|
/**
|
|
1114
1120
|
* Prettify the html code
|
|
@@ -2513,11 +2519,16 @@ function assertsExecutionSuccessful(executionResult) {
|
|
|
2513
2519
|
/**
|
|
2514
2520
|
* Determine if the pipeline is fully prepared
|
|
2515
2521
|
*
|
|
2522
|
+
* @see https://github.com/webgptorg/promptbook/discussions/196
|
|
2523
|
+
*
|
|
2516
2524
|
* @public exported from `@promptbook/core`
|
|
2517
2525
|
*/
|
|
2518
2526
|
function isPipelinePrepared(pipeline) {
|
|
2519
2527
|
// Note: Ignoring `pipeline.preparations` @@@
|
|
2520
2528
|
// Note: Ignoring `pipeline.knowledgePieces` @@@
|
|
2529
|
+
if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
|
|
2530
|
+
return false;
|
|
2531
|
+
}
|
|
2521
2532
|
if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
|
|
2522
2533
|
return false;
|
|
2523
2534
|
}
|
|
@@ -3838,7 +3849,7 @@ function prepareKnowledgePieces(knowledgeSources, tools, options) {
|
|
|
3838
3849
|
partialPieces = __spreadArray([], __read(partialPiecesUnchecked), false);
|
|
3839
3850
|
return [2 /*return*/, "break"];
|
|
3840
3851
|
}
|
|
3841
|
-
console.warn(spaceTrim$1(function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n
|
|
3852
|
+
console.warn(spaceTrim$1(function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n\n The source:\n ").concat(block(knowledgeSource.sourceContent
|
|
3842
3853
|
.split('\n')
|
|
3843
3854
|
.map(function (line) { return "> ".concat(line); })
|
|
3844
3855
|
.join('\n')), "\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
|
|
@@ -3876,7 +3887,7 @@ function prepareKnowledgePieces(knowledgeSources, tools, options) {
|
|
|
3876
3887
|
return [7 /*endfinally*/];
|
|
3877
3888
|
case 9:
|
|
3878
3889
|
if (partialPieces === null) {
|
|
3879
|
-
throw new KnowledgeScrapeError(spaceTrim$1(function (block) { return "\n Cannot scrape knowledge\n
|
|
3890
|
+
throw new KnowledgeScrapeError(spaceTrim$1(function (block) { return "\n Cannot scrape knowledge\n\n The source:\n > ".concat(block(knowledgeSource.sourceContent
|
|
3880
3891
|
.split('\n')
|
|
3881
3892
|
.map(function (line) { return "> ".concat(line); })
|
|
3882
3893
|
.join('\n')), "\n\n No scraper found for the mime type \"").concat(sourceHandler.mimeType, "\"\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
|
|
@@ -3977,6 +3988,8 @@ function prepareTasks(pipeline, tools, options) {
|
|
|
3977
3988
|
/**
|
|
3978
3989
|
* Prepare pipeline from string (markdown) format to JSON format
|
|
3979
3990
|
*
|
|
3991
|
+
* @see https://github.com/webgptorg/promptbook/discussions/196
|
|
3992
|
+
*
|
|
3980
3993
|
* Note: This function does not validate logic of the pipeline
|
|
3981
3994
|
* Note: This function acts as part of compilation process
|
|
3982
3995
|
* Note: When the pipeline is already prepared, it returns the same pipeline
|
|
@@ -3989,16 +4002,17 @@ function preparePipeline(pipeline, tools, options) {
|
|
|
3989
4002
|
<- TODO: [🧠][🪑] `promptbookVersion` */
|
|
3990
4003
|
knowledgeSources /*
|
|
3991
4004
|
<- TODO: [🧊] `knowledgePieces` */, personas /*
|
|
3992
|
-
<- TODO: [🧊] `preparations` */, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
|
|
4005
|
+
<- TODO: [🧊] `preparations` */, sources, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, title, collection, prepareTitleExecutor, _c, result, outputParameters, titleRaw, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
|
|
4006
|
+
var _d;
|
|
3993
4007
|
var _this = this;
|
|
3994
|
-
return __generator(this, function (
|
|
3995
|
-
switch (
|
|
4008
|
+
return __generator(this, function (_e) {
|
|
4009
|
+
switch (_e.label) {
|
|
3996
4010
|
case 0:
|
|
3997
4011
|
if (isPipelinePrepared(pipeline)) {
|
|
3998
4012
|
return [2 /*return*/, pipeline];
|
|
3999
4013
|
}
|
|
4000
4014
|
rootDirname = options.rootDirname, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? DEFAULT_MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? DEFAULT_IS_VERBOSE : _b;
|
|
4001
|
-
parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
4015
|
+
parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas, sources = pipeline.sources;
|
|
4002
4016
|
if (tools === undefined || tools.llm === undefined) {
|
|
4003
4017
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
|
4004
4018
|
}
|
|
@@ -4016,6 +4030,40 @@ function preparePipeline(pipeline, tools, options) {
|
|
|
4016
4030
|
// <- TODO: [🧊]
|
|
4017
4031
|
currentPreparation,
|
|
4018
4032
|
];
|
|
4033
|
+
title = pipeline.title;
|
|
4034
|
+
if (!(title === undefined || title === '' || title === DEFAULT_BOOK_TITLE)) return [3 /*break*/, 3];
|
|
4035
|
+
collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
|
|
4036
|
+
_c = createPipelineExecutor;
|
|
4037
|
+
_d = {};
|
|
4038
|
+
return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-title.book.md')];
|
|
4039
|
+
case 1:
|
|
4040
|
+
prepareTitleExecutor = _c.apply(void 0, [(_d.pipeline = _e.sent(),
|
|
4041
|
+
_d.tools = tools,
|
|
4042
|
+
_d)]);
|
|
4043
|
+
return [4 /*yield*/, prepareTitleExecutor({
|
|
4044
|
+
book: sources
|
|
4045
|
+
.map(function (_a) {
|
|
4046
|
+
var content = _a.content;
|
|
4047
|
+
return content;
|
|
4048
|
+
})
|
|
4049
|
+
.join('\n\n')
|
|
4050
|
+
// TODO: !!!!!!! Parameters in parameters - DO NOT ALLOW, ESCAPE:
|
|
4051
|
+
.split('{')
|
|
4052
|
+
.join('[')
|
|
4053
|
+
.split('}')
|
|
4054
|
+
.join(']'),
|
|
4055
|
+
})];
|
|
4056
|
+
case 2:
|
|
4057
|
+
result = _e.sent();
|
|
4058
|
+
assertsExecutionSuccessful(result);
|
|
4059
|
+
outputParameters = result.outputParameters;
|
|
4060
|
+
titleRaw = outputParameters.title;
|
|
4061
|
+
if (isVerbose) {
|
|
4062
|
+
console.info("The title is \"".concat(titleRaw, "\""));
|
|
4063
|
+
}
|
|
4064
|
+
title = titleRaw || DEFAULT_BOOK_TITLE;
|
|
4065
|
+
_e.label = 3;
|
|
4066
|
+
case 3:
|
|
4019
4067
|
preparedPersonas = new Array(personas.length);
|
|
4020
4068
|
return [4 /*yield*/, forEachAsync(personas, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (persona, index) { return __awaiter(_this, void 0, void 0, function () {
|
|
4021
4069
|
var modelRequirements, preparedPersona;
|
|
@@ -4034,12 +4082,12 @@ function preparePipeline(pipeline, tools, options) {
|
|
|
4034
4082
|
}
|
|
4035
4083
|
});
|
|
4036
4084
|
}); })];
|
|
4037
|
-
case
|
|
4038
|
-
|
|
4085
|
+
case 4:
|
|
4086
|
+
_e.sent();
|
|
4039
4087
|
knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
|
|
4040
4088
|
return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, __assign(__assign({}, tools), { llm: llmToolsWithUsage }), __assign(__assign({}, options), { rootDirname: rootDirname, maxParallelCount: maxParallelCount /* <- TODO: [🪂] */, isVerbose: isVerbose }))];
|
|
4041
|
-
case
|
|
4042
|
-
partialknowledgePiecesPrepared =
|
|
4089
|
+
case 5:
|
|
4090
|
+
partialknowledgePiecesPrepared = _e.sent();
|
|
4043
4091
|
knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
|
|
4044
4092
|
return [4 /*yield*/, prepareTasks({
|
|
4045
4093
|
parameters: parameters,
|
|
@@ -4050,8 +4098,8 @@ function preparePipeline(pipeline, tools, options) {
|
|
|
4050
4098
|
maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
|
|
4051
4099
|
isVerbose: isVerbose,
|
|
4052
4100
|
})];
|
|
4053
|
-
case
|
|
4054
|
-
tasksPrepared = (
|
|
4101
|
+
case 6:
|
|
4102
|
+
tasksPrepared = (_e.sent()).tasksPrepared;
|
|
4055
4103
|
// ----- /Tasks preparation -----
|
|
4056
4104
|
// TODO: [😂] Use here all `AsyncHighLevelAbstraction`
|
|
4057
4105
|
// Note: Count total usage
|
|
@@ -4062,7 +4110,7 @@ function preparePipeline(pipeline, tools, options) {
|
|
|
4062
4110
|
order: ORDER_OF_PIPELINE_JSON,
|
|
4063
4111
|
value: __assign(__assign({}, pipeline), {
|
|
4064
4112
|
// <- TODO: Probbably deeply clone the pipeline because `$exportJson` freezes the subobjects
|
|
4065
|
-
knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
|
|
4113
|
+
title: title, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
|
|
4066
4114
|
// <- TODO: [🪓] Here should be no need for spreading new array, just ` tasks: tasksPrepared`
|
|
4067
4115
|
personas: preparedPersonas, preparations: __spreadArray([], __read(preparations), false) }),
|
|
4068
4116
|
})];
|
|
@@ -6117,6 +6165,7 @@ var markdownScraperMetadata = $deepFreeze({
|
|
|
6117
6165
|
mimeTypes: ['text/markdown', 'text/plain'],
|
|
6118
6166
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6119
6167
|
isAvilableInBrowser: true,
|
|
6168
|
+
// <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6120
6169
|
requiredExecutables: [],
|
|
6121
6170
|
}); /* <- Note: [🤛] */
|
|
6122
6171
|
/**
|
|
@@ -6315,6 +6364,7 @@ var documentScraperMetadata = $deepFreeze({
|
|
|
6315
6364
|
mimeTypes: ['application/vnd.openxmlformats-officedocument.wordprocessingml.document'],
|
|
6316
6365
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6317
6366
|
isAvilableInBrowser: false,
|
|
6367
|
+
// <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6318
6368
|
requiredExecutables: ['Pandoc'],
|
|
6319
6369
|
}); /* <- Note: [🤛] */
|
|
6320
6370
|
/**
|
|
@@ -6479,6 +6529,7 @@ var legacyDocumentScraperMetadata = $deepFreeze({
|
|
|
6479
6529
|
mimeTypes: ['application/msword', 'text/rtf'],
|
|
6480
6530
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6481
6531
|
isAvilableInBrowser: false,
|
|
6532
|
+
// <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6482
6533
|
requiredExecutables: [
|
|
6483
6534
|
'Pandoc',
|
|
6484
6535
|
'LibreOffice',
|