@promptbook/remote-server 0.94.0-7 → 0.95.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -21
- package/esm/index.es.js +36 -36
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/types.index.d.ts +2 -2
- package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +2 -2
- package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
- package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
- package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
- package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
- package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
- package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
- package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
- package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
- package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
- package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
- package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
- package/esm/typings/src/types/typeAliases.d.ts +1 -1
- package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
- package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
- package/package.json +25 -14
- package/umd/index.umd.js +36 -36
- package/umd/index.umd.js.map +1 -1
package/README.md
CHANGED
|
@@ -25,10 +25,6 @@ Write AI applications using plain human language across multiple models and plat
|
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
|
|
28
|
-
<blockquote style="color: #ff8811">
|
|
29
|
-
<b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
|
|
30
|
-
</blockquote>
|
|
31
|
-
|
|
32
28
|
## 📦 Package `@promptbook/remote-server`
|
|
33
29
|
|
|
34
30
|
- Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
|
|
@@ -56,17 +52,15 @@ Rest of the documentation is common for **entire promptbook ecosystem**:
|
|
|
56
52
|
|
|
57
53
|
## 🤍 The Book Abstract
|
|
58
54
|
|
|
59
|
-
**It's time for a paradigm shift! The future of software is in plain English, French or Latin.**
|
|
55
|
+
**It's time for a paradigm shift! The future of software is written in plain English, French, or Latin.**
|
|
60
56
|
|
|
61
57
|
During the computer revolution, we have seen [multiple generations of computer languages](https://github.com/webgptorg/promptbook/discussions/180), from the physical rewiring of the vacuum tubes through low-level machine code to the high-level languages like Python or JavaScript. And now, we're on the edge of the **next revolution**!
|
|
62
58
|
|
|
63
|
-
|
|
64
|
-
|
|
65
59
|
It's a revolution of writing software in **plain human language** that is understandable and executable by both humans and machines – and it's going to change everything!
|
|
66
60
|
|
|
67
61
|
The incredible growth in power of microprocessors and the Moore's Law have been the driving force behind the ever-more powerful languages, and it's been an amazing journey! Similarly, the large language models (like GPT or Claude) are the next big thing in language technology, and they're set to transform the way we interact with computers.
|
|
68
62
|
|
|
69
|
-
This shift
|
|
63
|
+
This shift will happen whether we're ready or not. Our mission is to make it excellent, not just good.
|
|
70
64
|
|
|
71
65
|
**Join us in this journey!**
|
|
72
66
|
|
|
@@ -187,9 +181,6 @@ Join our growing community of developers and users:
|
|
|
187
181
|
|
|
188
182
|
_A concise, Markdown-based DSL for crafting AI workflows and automations._
|
|
189
183
|
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
184
|
### Introduction
|
|
194
185
|
|
|
195
186
|
Book is a Markdown-based language that simplifies the creation of AI applications, workflows, and automations. With human-readable commands, you can define inputs, outputs, personas, knowledge sources, and actions—without needing model-specific details.
|
|
@@ -217,7 +208,7 @@ Book is a Markdown-based language that simplifies the creation of AI application
|
|
|
217
208
|
→ {article}
|
|
218
209
|
```
|
|
219
210
|
|
|
220
|
-
Each part of the book defines one of
|
|
211
|
+
Each part of the book defines one of three circles:
|
|
221
212
|
|
|
222
213
|
### **1. What:** Workflows, Tasks and Parameters
|
|
223
214
|
|
|
@@ -239,8 +230,6 @@ Personas can have access to different knowledge, tools and actions. They can als
|
|
|
239
230
|
|
|
240
231
|
- [PERSONA](https://github.com/webgptorg/promptbook/blob/main/documents/commands/PERSONA.md)
|
|
241
232
|
|
|
242
|
-
|
|
243
|
-
|
|
244
233
|
### **3. How:** Knowledge, Instruments and Actions
|
|
245
234
|
|
|
246
235
|
The resources used by the personas are used to do the work.
|
|
@@ -255,9 +244,9 @@ The resources used by the personas are used to do the work.
|
|
|
255
244
|
|
|
256
245
|
Book language is based on markdown. It is subset of markdown. It is designed to be easy to read and write. It is designed to be understandable by both humans and machines and without specific knowledge of the language.
|
|
257
246
|
|
|
258
|
-
The file has `.book` extension
|
|
247
|
+
The file has a `.book` extension and uses UTF-8 encoding without BOM.
|
|
259
248
|
|
|
260
|
-
|
|
249
|
+
Books have two variants: flat — just a prompt without structure, and full — with tasks, commands, and prompts.
|
|
261
250
|
|
|
262
251
|
As it is source code, it can leverage all the features of version control systems like git and does not suffer from the problems of binary formats, proprietary formats, or no-code solutions.
|
|
263
252
|
|
|
@@ -288,13 +277,13 @@ Or you can install them separately:
|
|
|
288
277
|
|
|
289
278
|
- ⭐ **[ptbk](https://www.npmjs.com/package/ptbk)** - Bundle of all packages, when you want to install everything and you don't care about the size
|
|
290
279
|
- **[promptbook](https://www.npmjs.com/package/promptbook)** - Same as `ptbk`
|
|
291
|
-
- ⭐🧙♂️ **[@promptbook/
|
|
280
|
+
- ⭐🧙♂️ **[@promptbook/wizard](https://www.npmjs.com/package/@promptbook/wizard)** - Wizard to just run the books in node without any struggle
|
|
292
281
|
- **[@promptbook/core](https://www.npmjs.com/package/@promptbook/core)** - Core of the library, it contains the main logic for promptbooks
|
|
293
282
|
- **[@promptbook/node](https://www.npmjs.com/package/@promptbook/node)** - Core of the library for Node.js environment
|
|
294
283
|
- **[@promptbook/browser](https://www.npmjs.com/package/@promptbook/browser)** - Core of the library for browser environment
|
|
295
284
|
- ⭐ **[@promptbook/utils](https://www.npmjs.com/package/@promptbook/utils)** - Utility functions used in the library but also useful for individual use in preprocessing and postprocessing LLM inputs and outputs
|
|
296
285
|
- **[@promptbook/markdown-utils](https://www.npmjs.com/package/@promptbook/markdown-utils)** - Utility functions used for processing markdown
|
|
297
|
-
- _(Not finished)_ **[@promptbook/
|
|
286
|
+
- _(Not finished)_ **[@promptbook/wizard](https://www.npmjs.com/package/@promptbook/wizard)** - Wizard for creating+running promptbooks in single line
|
|
298
287
|
- **[@promptbook/javascript](https://www.npmjs.com/package/@promptbook/javascript)** - Execution tools for javascript inside promptbooks
|
|
299
288
|
- **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
|
|
300
289
|
- **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
|
|
@@ -338,9 +327,7 @@ The following glossary is used to clarify certain concepts:
|
|
|
338
327
|
- **Retrieval-augmented generation** is a machine learning paradigm where a model generates text by retrieving relevant information from a large database of text. This approach combines the benefits of generative models and retrieval models.
|
|
339
328
|
- **Longtail** refers to non-common or rare events, items, or entities that are not well-represented in the training data of machine learning models. Longtail items are often challenging for models to predict accurately.
|
|
340
329
|
|
|
341
|
-
_Note: This section is not complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
|
|
342
|
-
|
|
343
|
-
|
|
330
|
+
_Note: This section is not a complete dictionary, more list of general AI / LLM terms that has connection with Promptbook_
|
|
344
331
|
|
|
345
332
|
### 💯 Core concepts
|
|
346
333
|
|
package/esm/index.es.js
CHANGED
|
@@ -33,7 +33,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
33
33
|
* @generated
|
|
34
34
|
* @see https://github.com/webgptorg/promptbook
|
|
35
35
|
*/
|
|
36
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
36
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.95.0';
|
|
37
37
|
/**
|
|
38
38
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
39
39
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1226,7 +1226,7 @@ function checkSerializableAsJson(options) {
|
|
|
1226
1226
|
else {
|
|
1227
1227
|
for (const [subName, subValue] of Object.entries(value)) {
|
|
1228
1228
|
if (subValue === undefined) {
|
|
1229
|
-
// Note: undefined in object is serializable - it is just
|
|
1229
|
+
// Note: undefined in object is serializable - it is just omitted
|
|
1230
1230
|
continue;
|
|
1231
1231
|
}
|
|
1232
1232
|
checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
|
|
@@ -2172,7 +2172,7 @@ const UNCERTAIN_USAGE = $deepFreeze({
|
|
|
2172
2172
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
2173
2173
|
*/
|
|
2174
2174
|
|
|
2175
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2175
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2176
2176
|
|
|
2177
2177
|
/**
|
|
2178
2178
|
* Checks if value is valid email
|
|
@@ -2292,7 +2292,7 @@ function prettifyMarkdown(content) {
|
|
|
2292
2292
|
});
|
|
2293
2293
|
}
|
|
2294
2294
|
catch (error) {
|
|
2295
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
2295
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
2296
2296
|
console.error('There was an error with prettifying the markdown, using the original as the fallback', {
|
|
2297
2297
|
error,
|
|
2298
2298
|
html: content,
|
|
@@ -2557,7 +2557,7 @@ class SimplePipelineCollection {
|
|
|
2557
2557
|
|
|
2558
2558
|
Note: You have probably forgotten to run "ptbk make" to update the collection
|
|
2559
2559
|
Note: Pipelines with the same URL are not allowed
|
|
2560
|
-
Only
|
|
2560
|
+
Only exception is when the pipelines are identical
|
|
2561
2561
|
|
|
2562
2562
|
`));
|
|
2563
2563
|
}
|
|
@@ -2710,12 +2710,12 @@ function countUsage(llmTools) {
|
|
|
2710
2710
|
get title() {
|
|
2711
2711
|
return `${llmTools.title} (+usage)`;
|
|
2712
2712
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2713
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2713
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2714
2714
|
},
|
|
2715
2715
|
get description() {
|
|
2716
2716
|
return `${llmTools.description} (+usage)`;
|
|
2717
2717
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2718
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2718
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2719
2719
|
},
|
|
2720
2720
|
checkConfiguration() {
|
|
2721
2721
|
return /* not await */ llmTools.checkConfiguration();
|
|
@@ -2942,13 +2942,13 @@ function joinLlmExecutionTools(...llmExecutionTools) {
|
|
|
2942
2942
|
|
|
2943
2943
|
Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
|
|
2944
2944
|
`);
|
|
2945
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
2945
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
2946
2946
|
console.warn(warningMessage);
|
|
2947
2947
|
// <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
|
|
2948
2948
|
/*
|
|
2949
2949
|
return {
|
|
2950
2950
|
async listModels() {
|
|
2951
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
2951
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
2952
2952
|
console.warn(
|
|
2953
2953
|
spaceTrim(
|
|
2954
2954
|
(block) => `
|
|
@@ -3224,17 +3224,17 @@ function $registeredScrapersMessage(availableScrapers) {
|
|
|
3224
3224
|
* Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
|
|
3225
3225
|
*/
|
|
3226
3226
|
const all = [];
|
|
3227
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
3227
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
|
|
3228
3228
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
3229
3229
|
continue;
|
|
3230
3230
|
}
|
|
3231
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
3231
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
3232
3232
|
}
|
|
3233
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
3233
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
|
|
3234
3234
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
3235
3235
|
continue;
|
|
3236
3236
|
}
|
|
3237
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
3237
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
3238
3238
|
}
|
|
3239
3239
|
for (const { metadata } of availableScrapers) {
|
|
3240
3240
|
all.push(metadata);
|
|
@@ -3246,8 +3246,8 @@ function $registeredScrapersMessage(availableScrapers) {
|
|
|
3246
3246
|
const isInstalled = $scrapersRegister
|
|
3247
3247
|
.list()
|
|
3248
3248
|
.find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
|
|
3249
|
-
const
|
|
3250
|
-
return { ...metadata, isMetadataAviailable, isInstalled,
|
|
3249
|
+
const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
|
|
3250
|
+
return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
|
|
3251
3251
|
});
|
|
3252
3252
|
if (metadata.length === 0) {
|
|
3253
3253
|
return spaceTrim(`
|
|
@@ -3260,7 +3260,7 @@ function $registeredScrapersMessage(availableScrapers) {
|
|
|
3260
3260
|
return spaceTrim((block) => `
|
|
3261
3261
|
Available scrapers are:
|
|
3262
3262
|
${block(metadata
|
|
3263
|
-
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes,
|
|
3263
|
+
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
|
|
3264
3264
|
const more = [];
|
|
3265
3265
|
// TODO: [🧠] Maybe use `documentationUrl`
|
|
3266
3266
|
if (isMetadataAviailable) {
|
|
@@ -3269,16 +3269,16 @@ function $registeredScrapersMessage(availableScrapers) {
|
|
|
3269
3269
|
if (isInstalled) {
|
|
3270
3270
|
more.push(`🟩 Installed`);
|
|
3271
3271
|
} // not else
|
|
3272
|
-
if (
|
|
3272
|
+
if (isAvailableInTools) {
|
|
3273
3273
|
more.push(`🟦 Available in tools`);
|
|
3274
3274
|
} // not else
|
|
3275
3275
|
if (!isMetadataAviailable && isInstalled) {
|
|
3276
3276
|
more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
|
|
3277
3277
|
} // not else
|
|
3278
|
-
if (!isInstalled &&
|
|
3278
|
+
if (!isInstalled && isAvailableInTools) {
|
|
3279
3279
|
more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
|
|
3280
3280
|
} // not else
|
|
3281
|
-
if (!
|
|
3281
|
+
if (!isAvailableInBrowser) {
|
|
3282
3282
|
more.push(`Not usable in browser`);
|
|
3283
3283
|
}
|
|
3284
3284
|
const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
|
|
@@ -4005,7 +4005,7 @@ TODO: [🧊] This is how it can look in future
|
|
|
4005
4005
|
/**
|
|
4006
4006
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
4007
4007
|
* Put `knowledgePieces` into `PrepareKnowledgeOptions`
|
|
4008
|
-
* TODO: [🪂] More than max things can run in parallel by
|
|
4008
|
+
* TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
|
|
4009
4009
|
* TODO: [🧠][❎] Do here proper M:N mapping
|
|
4010
4010
|
* [x] One source can make multiple pieces
|
|
4011
4011
|
* [ ] One piece can have multiple sources
|
|
@@ -5694,10 +5694,10 @@ function knowledgePiecesToString(knowledgePieces) {
|
|
|
5694
5694
|
*/
|
|
5695
5695
|
async function getKnowledgeForTask(options) {
|
|
5696
5696
|
const { tools, preparedPipeline, task, parameters } = options;
|
|
5697
|
-
const
|
|
5698
|
-
const
|
|
5697
|
+
const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
|
|
5698
|
+
const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
|
|
5699
5699
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5700
|
-
if (
|
|
5700
|
+
if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
|
|
5701
5701
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
5702
5702
|
}
|
|
5703
5703
|
try {
|
|
@@ -5708,7 +5708,7 @@ async function getKnowledgeForTask(options) {
|
|
|
5708
5708
|
title: 'Knowledge Search',
|
|
5709
5709
|
modelRequirements: {
|
|
5710
5710
|
modelVariant: 'EMBEDDING',
|
|
5711
|
-
modelName:
|
|
5711
|
+
modelName: firstKnowledgeIndex.modelName,
|
|
5712
5712
|
},
|
|
5713
5713
|
content: task.content,
|
|
5714
5714
|
parameters,
|
|
@@ -5716,7 +5716,7 @@ async function getKnowledgeForTask(options) {
|
|
|
5716
5716
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5717
5717
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5718
5718
|
const { index } = knowledgePiece;
|
|
5719
|
-
const knowledgePieceIndex = index.find((i) => i.modelName ===
|
|
5719
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
|
|
5720
5720
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5721
5721
|
if (knowledgePieceIndex === undefined) {
|
|
5722
5722
|
return {
|
|
@@ -5737,8 +5737,8 @@ async function getKnowledgeForTask(options) {
|
|
|
5737
5737
|
task,
|
|
5738
5738
|
taskEmbeddingPrompt,
|
|
5739
5739
|
taskEmbeddingResult,
|
|
5740
|
-
|
|
5741
|
-
|
|
5740
|
+
firstKnowledgePiece,
|
|
5741
|
+
firstKnowledgeIndex,
|
|
5742
5742
|
knowledgePiecesWithRelevance,
|
|
5743
5743
|
knowledgePiecesSorted,
|
|
5744
5744
|
knowledgePiecesLimited,
|
|
@@ -5807,7 +5807,7 @@ async function getReservedParametersForTask(options) {
|
|
|
5807
5807
|
* @private internal utility of `createPipelineExecutor`
|
|
5808
5808
|
*/
|
|
5809
5809
|
async function executeTask(options) {
|
|
5810
|
-
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled,
|
|
5810
|
+
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
|
|
5811
5811
|
const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
|
|
5812
5812
|
// Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
|
|
5813
5813
|
const usedParameterNames = extractParameterNamesFromTask(currentTask);
|
|
@@ -5895,7 +5895,7 @@ async function executeTask(options) {
|
|
|
5895
5895
|
cacheDirname,
|
|
5896
5896
|
intermediateFilesStrategy,
|
|
5897
5897
|
isAutoInstalled,
|
|
5898
|
-
|
|
5898
|
+
isNotPreparedWarningSuppressed,
|
|
5899
5899
|
});
|
|
5900
5900
|
await onProgress({
|
|
5901
5901
|
outputParameters: {
|
|
@@ -5990,7 +5990,7 @@ async function executePipeline(options) {
|
|
|
5990
5990
|
}
|
|
5991
5991
|
return exportJson({
|
|
5992
5992
|
name: `executionReport`,
|
|
5993
|
-
message: `
|
|
5993
|
+
message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
|
|
5994
5994
|
order: [],
|
|
5995
5995
|
value: {
|
|
5996
5996
|
isSuccessful: false,
|
|
@@ -6027,7 +6027,7 @@ async function executePipeline(options) {
|
|
|
6027
6027
|
return exportJson({
|
|
6028
6028
|
name: 'pipelineExecutorResult',
|
|
6029
6029
|
message: spaceTrim$1((block) => `
|
|
6030
|
-
|
|
6030
|
+
Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
|
|
6031
6031
|
|
|
6032
6032
|
${block(pipelineIdentification)}
|
|
6033
6033
|
`),
|
|
@@ -6168,7 +6168,7 @@ async function executePipeline(options) {
|
|
|
6168
6168
|
}
|
|
6169
6169
|
return exportJson({
|
|
6170
6170
|
name: 'pipelineExecutorResult',
|
|
6171
|
-
message: `
|
|
6171
|
+
message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
|
|
6172
6172
|
order: [],
|
|
6173
6173
|
value: {
|
|
6174
6174
|
isSuccessful: false,
|
|
@@ -6219,7 +6219,7 @@ async function executePipeline(options) {
|
|
|
6219
6219
|
* @public exported from `@promptbook/core`
|
|
6220
6220
|
*/
|
|
6221
6221
|
function createPipelineExecutor(options) {
|
|
6222
|
-
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE,
|
|
6222
|
+
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
|
|
6223
6223
|
validatePipeline(pipeline);
|
|
6224
6224
|
const pipelineIdentification = (() => {
|
|
6225
6225
|
// Note: This is a 😐 implementation of [🚞]
|
|
@@ -6236,7 +6236,7 @@ function createPipelineExecutor(options) {
|
|
|
6236
6236
|
if (isPipelinePrepared(pipeline)) {
|
|
6237
6237
|
preparedPipeline = pipeline;
|
|
6238
6238
|
}
|
|
6239
|
-
else if (
|
|
6239
|
+
else if (isNotPreparedWarningSuppressed !== true) {
|
|
6240
6240
|
console.warn(spaceTrim$1((block) => `
|
|
6241
6241
|
Pipeline is not prepared
|
|
6242
6242
|
|
|
@@ -6269,7 +6269,7 @@ function createPipelineExecutor(options) {
|
|
|
6269
6269
|
maxParallelCount,
|
|
6270
6270
|
csvSettings,
|
|
6271
6271
|
isVerbose,
|
|
6272
|
-
|
|
6272
|
+
isNotPreparedWarningSuppressed,
|
|
6273
6273
|
rootDirname,
|
|
6274
6274
|
cacheDirname,
|
|
6275
6275
|
intermediateFilesStrategy,
|
|
@@ -6278,7 +6278,7 @@ function createPipelineExecutor(options) {
|
|
|
6278
6278
|
assertsError(error);
|
|
6279
6279
|
return exportJson({
|
|
6280
6280
|
name: 'pipelineExecutorResult',
|
|
6281
|
-
message: `
|
|
6281
|
+
message: `Unsuccessful PipelineExecutorResult, last catch`,
|
|
6282
6282
|
order: [],
|
|
6283
6283
|
value: {
|
|
6284
6284
|
isSuccessful: false,
|