@promptbook/remote-server 0.94.0-7 → 0.95.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -21
- package/esm/index.es.js +36 -36
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/types.index.d.ts +2 -2
- package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +2 -2
- package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
- package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
- package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
- package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
- package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
- package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
- package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
- package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
- package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
- package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
- package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
- package/esm/typings/src/types/typeAliases.d.ts +1 -1
- package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
- package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
- package/package.json +25 -14
- package/umd/index.umd.js +36 -36
- package/umd/index.umd.js.map +1 -1
|
@@ -10,7 +10,7 @@ export declare const websiteScraperMetadata: import("type-fest/source/readonly-d
|
|
|
10
10
|
className: string;
|
|
11
11
|
mimeTypes: string[];
|
|
12
12
|
documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
|
|
13
|
-
|
|
13
|
+
isAvailableInBrowser: false;
|
|
14
14
|
requiredExecutables: never[];
|
|
15
15
|
}>;
|
|
16
16
|
/**
|
|
@@ -19,7 +19,7 @@ export declare const websiteScraperMetadata: import("type-fest/source/readonly-d
|
|
|
19
19
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
20
20
|
*
|
|
21
21
|
* @public exported from `@promptbook/core`
|
|
22
|
-
* @public exported from `@promptbook/
|
|
22
|
+
* @public exported from `@promptbook/wizard`
|
|
23
23
|
* @public exported from `@promptbook/cli`
|
|
24
24
|
*/
|
|
25
25
|
export declare const _WebsiteScraperMetadataRegistration: Registration;
|
|
@@ -658,7 +658,7 @@ export type number_seed = number_percent;
|
|
|
658
658
|
* - ❤ is equivalent to more than 1
|
|
659
659
|
*/
|
|
660
660
|
export type number_likeness = number;
|
|
661
|
-
export type
|
|
661
|
+
export type number_milliseconds = number_integer;
|
|
662
662
|
export type number_seconds = number;
|
|
663
663
|
export type number_minutes = number;
|
|
664
664
|
export type number_hours = number;
|
|
@@ -11,7 +11,7 @@ import type { string_filename } from '../../types/typeAliases';
|
|
|
11
11
|
*/
|
|
12
12
|
export declare function listAllFiles(path: string_dirname, isRecursive: boolean, fs: FilesystemTools): Promise<Array<string_filename>>;
|
|
13
13
|
/**
|
|
14
|
-
* TODO: [😶] Unite
|
|
14
|
+
* TODO: [😶] Unite folder listing
|
|
15
15
|
* Note: Not [~🟢~] because it is not directly dependent on `fs
|
|
16
16
|
* TODO: [🖇] What about symlinks?
|
|
17
17
|
*/
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.94.0
|
|
18
|
+
* It follows semantic versioning (e.g., `0.94.0`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
|
@@ -5,9 +5,9 @@ import type { PrepareAndScrapeOptions } from '../prepare/PrepareAndScrapeOptions
|
|
|
5
5
|
import type { string_filename } from '../types/typeAliases';
|
|
6
6
|
import type { string_pipeline_url } from '../types/typeAliases';
|
|
7
7
|
/**
|
|
8
|
-
* @see ./
|
|
8
|
+
* @see ./wizard.ts `getPipeline` method
|
|
9
9
|
*
|
|
10
|
-
* @private usable through `ptbk run` and `@
|
|
10
|
+
* @private usable through `ptbk run` and `@promptbook/wizard`
|
|
11
11
|
*/
|
|
12
12
|
export declare function $getCompiledBook(tools: Required<Pick<ExecutionTools, 'fs' | 'fetch'>>, pipelineSource: string_filename | string_pipeline_url | PipelineString, options?: PrepareAndScrapeOptions): Promise<PipelineJson>;
|
|
13
13
|
/**
|
|
@@ -7,14 +7,14 @@ import type { string_filename } from '../types/typeAliases';
|
|
|
7
7
|
import type { string_parameter_value } from '../types/typeAliases';
|
|
8
8
|
import type { string_pipeline_url } from '../types/typeAliases';
|
|
9
9
|
/**
|
|
10
|
-
*
|
|
11
|
-
* Look at `
|
|
10
|
+
* Wizard for simple usage of the Promptbook
|
|
11
|
+
* Look at `wizard` for more details
|
|
12
12
|
*
|
|
13
13
|
* Note: This works only in Node.js environment and looks for the configuration, environment, tools and cache in the Node.js environment
|
|
14
14
|
*
|
|
15
15
|
* @private just for single instance
|
|
16
16
|
*/
|
|
17
|
-
declare class
|
|
17
|
+
declare class Wizard {
|
|
18
18
|
/**
|
|
19
19
|
* Run the book
|
|
20
20
|
*
|
|
@@ -53,14 +53,14 @@ declare class Wizzard {
|
|
|
53
53
|
getCompiledBook(pipelineSource: string_filename | string_pipeline_url | PipelineString): Promise<PipelineJson>;
|
|
54
54
|
}
|
|
55
55
|
/**
|
|
56
|
-
*
|
|
56
|
+
* Wizard for simple usage of the Promptbook
|
|
57
57
|
*
|
|
58
58
|
* Note: This works only in Node.js environment and looks for the configuration, environment, tools and cache in the Node.js environment
|
|
59
59
|
*
|
|
60
60
|
* @singleton
|
|
61
|
-
* @public exported from `@promptbook/
|
|
61
|
+
* @public exported from `@promptbook/wizard`
|
|
62
62
|
*/
|
|
63
|
-
export declare const
|
|
63
|
+
export declare const wizard: Wizard;
|
|
64
64
|
export {};
|
|
65
65
|
/**
|
|
66
66
|
* TODO: [🧠] Maybe some way how to handle the progress and streaming?
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/remote-server",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.95.0",
|
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -36,6 +36,29 @@
|
|
|
36
36
|
"o1-mini",
|
|
37
37
|
"o1-preview",
|
|
38
38
|
"anthropic",
|
|
39
|
+
"claude",
|
|
40
|
+
"claude-3",
|
|
41
|
+
"claude-3-opus",
|
|
42
|
+
"claude-3-sonnet",
|
|
43
|
+
"claude-3-haiku",
|
|
44
|
+
"gemini",
|
|
45
|
+
"gemini-pro",
|
|
46
|
+
"gemini-flash",
|
|
47
|
+
"mixtral",
|
|
48
|
+
"mistral",
|
|
49
|
+
"ollama",
|
|
50
|
+
"ai-orchestration",
|
|
51
|
+
"prompt-engineering",
|
|
52
|
+
"llmops",
|
|
53
|
+
"multimodal",
|
|
54
|
+
"reasoning",
|
|
55
|
+
"rag",
|
|
56
|
+
"embeddings",
|
|
57
|
+
"function-calling",
|
|
58
|
+
"large-language-models",
|
|
59
|
+
"ai-application-framework",
|
|
60
|
+
"text-generation",
|
|
61
|
+
"ai-agents",
|
|
39
62
|
"LLMOps"
|
|
40
63
|
],
|
|
41
64
|
"license": "BUSL-1.1",
|
|
@@ -47,23 +70,11 @@
|
|
|
47
70
|
"node": ">=16.0.0",
|
|
48
71
|
"npm": ">=8.0.0"
|
|
49
72
|
},
|
|
50
|
-
"cspell": {
|
|
51
|
-
"version": "0.2",
|
|
52
|
-
"language": "en",
|
|
53
|
-
"ignorePaths": [
|
|
54
|
-
"node_modules",
|
|
55
|
-
".next",
|
|
56
|
-
"coverage",
|
|
57
|
-
"dist",
|
|
58
|
-
".git"
|
|
59
|
-
],
|
|
60
|
-
"words": []
|
|
61
|
-
},
|
|
62
73
|
"main": "./umd/index.umd.js",
|
|
63
74
|
"module": "./esm/index.es.js",
|
|
64
75
|
"typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
|
|
65
76
|
"peerDependencies": {
|
|
66
|
-
"@promptbook/core": "0.
|
|
77
|
+
"@promptbook/core": "0.95.0"
|
|
67
78
|
},
|
|
68
79
|
"dependencies": {
|
|
69
80
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
* @generated
|
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
|
50
50
|
*/
|
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.95.0';
|
|
52
52
|
/**
|
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
54
54
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1241,7 +1241,7 @@
|
|
|
1241
1241
|
else {
|
|
1242
1242
|
for (const [subName, subValue] of Object.entries(value)) {
|
|
1243
1243
|
if (subValue === undefined) {
|
|
1244
|
-
// Note: undefined in object is serializable - it is just
|
|
1244
|
+
// Note: undefined in object is serializable - it is just omitted
|
|
1245
1245
|
continue;
|
|
1246
1246
|
}
|
|
1247
1247
|
checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
|
|
@@ -2187,7 +2187,7 @@
|
|
|
2187
2187
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
2188
2188
|
*/
|
|
2189
2189
|
|
|
2190
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2190
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
2191
2191
|
|
|
2192
2192
|
/**
|
|
2193
2193
|
* Checks if value is valid email
|
|
@@ -2307,7 +2307,7 @@
|
|
|
2307
2307
|
});
|
|
2308
2308
|
}
|
|
2309
2309
|
catch (error) {
|
|
2310
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
2310
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
2311
2311
|
console.error('There was an error with prettifying the markdown, using the original as the fallback', {
|
|
2312
2312
|
error,
|
|
2313
2313
|
html: content,
|
|
@@ -2572,7 +2572,7 @@
|
|
|
2572
2572
|
|
|
2573
2573
|
Note: You have probably forgotten to run "ptbk make" to update the collection
|
|
2574
2574
|
Note: Pipelines with the same URL are not allowed
|
|
2575
|
-
Only
|
|
2575
|
+
Only exception is when the pipelines are identical
|
|
2576
2576
|
|
|
2577
2577
|
`));
|
|
2578
2578
|
}
|
|
@@ -2725,12 +2725,12 @@
|
|
|
2725
2725
|
get title() {
|
|
2726
2726
|
return `${llmTools.title} (+usage)`;
|
|
2727
2727
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2728
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2728
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2729
2729
|
},
|
|
2730
2730
|
get description() {
|
|
2731
2731
|
return `${llmTools.description} (+usage)`;
|
|
2732
2732
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2733
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2733
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2734
2734
|
},
|
|
2735
2735
|
checkConfiguration() {
|
|
2736
2736
|
return /* not await */ llmTools.checkConfiguration();
|
|
@@ -2957,13 +2957,13 @@
|
|
|
2957
2957
|
|
|
2958
2958
|
Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
|
|
2959
2959
|
`);
|
|
2960
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
2960
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
2961
2961
|
console.warn(warningMessage);
|
|
2962
2962
|
// <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
|
|
2963
2963
|
/*
|
|
2964
2964
|
return {
|
|
2965
2965
|
async listModels() {
|
|
2966
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
2966
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
2967
2967
|
console.warn(
|
|
2968
2968
|
spaceTrim(
|
|
2969
2969
|
(block) => `
|
|
@@ -3239,17 +3239,17 @@
|
|
|
3239
3239
|
* Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
|
|
3240
3240
|
*/
|
|
3241
3241
|
const all = [];
|
|
3242
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
3242
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
|
|
3243
3243
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
3244
3244
|
continue;
|
|
3245
3245
|
}
|
|
3246
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
3246
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
3247
3247
|
}
|
|
3248
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
3248
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
|
|
3249
3249
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
3250
3250
|
continue;
|
|
3251
3251
|
}
|
|
3252
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
3252
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
3253
3253
|
}
|
|
3254
3254
|
for (const { metadata } of availableScrapers) {
|
|
3255
3255
|
all.push(metadata);
|
|
@@ -3261,8 +3261,8 @@
|
|
|
3261
3261
|
const isInstalled = $scrapersRegister
|
|
3262
3262
|
.list()
|
|
3263
3263
|
.find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
|
|
3264
|
-
const
|
|
3265
|
-
return { ...metadata, isMetadataAviailable, isInstalled,
|
|
3264
|
+
const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
|
|
3265
|
+
return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
|
|
3266
3266
|
});
|
|
3267
3267
|
if (metadata.length === 0) {
|
|
3268
3268
|
return spaceTrim__default["default"](`
|
|
@@ -3275,7 +3275,7 @@
|
|
|
3275
3275
|
return spaceTrim__default["default"]((block) => `
|
|
3276
3276
|
Available scrapers are:
|
|
3277
3277
|
${block(metadata
|
|
3278
|
-
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes,
|
|
3278
|
+
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
|
|
3279
3279
|
const more = [];
|
|
3280
3280
|
// TODO: [🧠] Maybe use `documentationUrl`
|
|
3281
3281
|
if (isMetadataAviailable) {
|
|
@@ -3284,16 +3284,16 @@
|
|
|
3284
3284
|
if (isInstalled) {
|
|
3285
3285
|
more.push(`🟩 Installed`);
|
|
3286
3286
|
} // not else
|
|
3287
|
-
if (
|
|
3287
|
+
if (isAvailableInTools) {
|
|
3288
3288
|
more.push(`🟦 Available in tools`);
|
|
3289
3289
|
} // not else
|
|
3290
3290
|
if (!isMetadataAviailable && isInstalled) {
|
|
3291
3291
|
more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
|
|
3292
3292
|
} // not else
|
|
3293
|
-
if (!isInstalled &&
|
|
3293
|
+
if (!isInstalled && isAvailableInTools) {
|
|
3294
3294
|
more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
|
|
3295
3295
|
} // not else
|
|
3296
|
-
if (!
|
|
3296
|
+
if (!isAvailableInBrowser) {
|
|
3297
3297
|
more.push(`Not usable in browser`);
|
|
3298
3298
|
}
|
|
3299
3299
|
const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
|
|
@@ -4020,7 +4020,7 @@
|
|
|
4020
4020
|
/**
|
|
4021
4021
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
4022
4022
|
* Put `knowledgePieces` into `PrepareKnowledgeOptions`
|
|
4023
|
-
* TODO: [🪂] More than max things can run in parallel by
|
|
4023
|
+
* TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
|
|
4024
4024
|
* TODO: [🧠][❎] Do here proper M:N mapping
|
|
4025
4025
|
* [x] One source can make multiple pieces
|
|
4026
4026
|
* [ ] One piece can have multiple sources
|
|
@@ -5709,10 +5709,10 @@
|
|
|
5709
5709
|
*/
|
|
5710
5710
|
async function getKnowledgeForTask(options) {
|
|
5711
5711
|
const { tools, preparedPipeline, task, parameters } = options;
|
|
5712
|
-
const
|
|
5713
|
-
const
|
|
5712
|
+
const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
|
|
5713
|
+
const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
|
|
5714
5714
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5715
|
-
if (
|
|
5715
|
+
if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
|
|
5716
5716
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
5717
5717
|
}
|
|
5718
5718
|
try {
|
|
@@ -5723,7 +5723,7 @@
|
|
|
5723
5723
|
title: 'Knowledge Search',
|
|
5724
5724
|
modelRequirements: {
|
|
5725
5725
|
modelVariant: 'EMBEDDING',
|
|
5726
|
-
modelName:
|
|
5726
|
+
modelName: firstKnowledgeIndex.modelName,
|
|
5727
5727
|
},
|
|
5728
5728
|
content: task.content,
|
|
5729
5729
|
parameters,
|
|
@@ -5731,7 +5731,7 @@
|
|
|
5731
5731
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5732
5732
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5733
5733
|
const { index } = knowledgePiece;
|
|
5734
|
-
const knowledgePieceIndex = index.find((i) => i.modelName ===
|
|
5734
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
|
|
5735
5735
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5736
5736
|
if (knowledgePieceIndex === undefined) {
|
|
5737
5737
|
return {
|
|
@@ -5752,8 +5752,8 @@
|
|
|
5752
5752
|
task,
|
|
5753
5753
|
taskEmbeddingPrompt,
|
|
5754
5754
|
taskEmbeddingResult,
|
|
5755
|
-
|
|
5756
|
-
|
|
5755
|
+
firstKnowledgePiece,
|
|
5756
|
+
firstKnowledgeIndex,
|
|
5757
5757
|
knowledgePiecesWithRelevance,
|
|
5758
5758
|
knowledgePiecesSorted,
|
|
5759
5759
|
knowledgePiecesLimited,
|
|
@@ -5822,7 +5822,7 @@
|
|
|
5822
5822
|
* @private internal utility of `createPipelineExecutor`
|
|
5823
5823
|
*/
|
|
5824
5824
|
async function executeTask(options) {
|
|
5825
|
-
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled,
|
|
5825
|
+
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
|
|
5826
5826
|
const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
|
|
5827
5827
|
// Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
|
|
5828
5828
|
const usedParameterNames = extractParameterNamesFromTask(currentTask);
|
|
@@ -5910,7 +5910,7 @@
|
|
|
5910
5910
|
cacheDirname,
|
|
5911
5911
|
intermediateFilesStrategy,
|
|
5912
5912
|
isAutoInstalled,
|
|
5913
|
-
|
|
5913
|
+
isNotPreparedWarningSuppressed,
|
|
5914
5914
|
});
|
|
5915
5915
|
await onProgress({
|
|
5916
5916
|
outputParameters: {
|
|
@@ -6005,7 +6005,7 @@
|
|
|
6005
6005
|
}
|
|
6006
6006
|
return exportJson({
|
|
6007
6007
|
name: `executionReport`,
|
|
6008
|
-
message: `
|
|
6008
|
+
message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
|
|
6009
6009
|
order: [],
|
|
6010
6010
|
value: {
|
|
6011
6011
|
isSuccessful: false,
|
|
@@ -6042,7 +6042,7 @@
|
|
|
6042
6042
|
return exportJson({
|
|
6043
6043
|
name: 'pipelineExecutorResult',
|
|
6044
6044
|
message: spaceTrim.spaceTrim((block) => `
|
|
6045
|
-
|
|
6045
|
+
Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
|
|
6046
6046
|
|
|
6047
6047
|
${block(pipelineIdentification)}
|
|
6048
6048
|
`),
|
|
@@ -6183,7 +6183,7 @@
|
|
|
6183
6183
|
}
|
|
6184
6184
|
return exportJson({
|
|
6185
6185
|
name: 'pipelineExecutorResult',
|
|
6186
|
-
message: `
|
|
6186
|
+
message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
|
|
6187
6187
|
order: [],
|
|
6188
6188
|
value: {
|
|
6189
6189
|
isSuccessful: false,
|
|
@@ -6234,7 +6234,7 @@
|
|
|
6234
6234
|
* @public exported from `@promptbook/core`
|
|
6235
6235
|
*/
|
|
6236
6236
|
function createPipelineExecutor(options) {
|
|
6237
|
-
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE,
|
|
6237
|
+
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
|
|
6238
6238
|
validatePipeline(pipeline);
|
|
6239
6239
|
const pipelineIdentification = (() => {
|
|
6240
6240
|
// Note: This is a 😐 implementation of [🚞]
|
|
@@ -6251,7 +6251,7 @@
|
|
|
6251
6251
|
if (isPipelinePrepared(pipeline)) {
|
|
6252
6252
|
preparedPipeline = pipeline;
|
|
6253
6253
|
}
|
|
6254
|
-
else if (
|
|
6254
|
+
else if (isNotPreparedWarningSuppressed !== true) {
|
|
6255
6255
|
console.warn(spaceTrim.spaceTrim((block) => `
|
|
6256
6256
|
Pipeline is not prepared
|
|
6257
6257
|
|
|
@@ -6284,7 +6284,7 @@
|
|
|
6284
6284
|
maxParallelCount,
|
|
6285
6285
|
csvSettings,
|
|
6286
6286
|
isVerbose,
|
|
6287
|
-
|
|
6287
|
+
isNotPreparedWarningSuppressed,
|
|
6288
6288
|
rootDirname,
|
|
6289
6289
|
cacheDirname,
|
|
6290
6290
|
intermediateFilesStrategy,
|
|
@@ -6293,7 +6293,7 @@
|
|
|
6293
6293
|
assertsError(error);
|
|
6294
6294
|
return exportJson({
|
|
6295
6295
|
name: 'pipelineExecutorResult',
|
|
6296
|
-
message: `
|
|
6296
|
+
message: `Unsuccessful PipelineExecutorResult, last catch`,
|
|
6297
6297
|
order: [],
|
|
6298
6298
|
value: {
|
|
6299
6299
|
isSuccessful: false,
|