@promptbook/node 0.94.0 → 0.98.0-2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -2
- package/esm/index.es.js +44 -44
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/types.index.d.ts +2 -2
- package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +2 -2
- package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
- package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
- package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
- package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
- package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
- package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
- package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
- package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
- package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +2 -2
- package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
- package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
- package/esm/typings/src/types/typeAliases.d.ts +1 -1
- package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
- package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
- package/package.json +2 -14
- package/umd/index.umd.js +44 -44
- package/umd/index.umd.js.map +1 -1
|
@@ -10,7 +10,7 @@ export declare const websiteScraperMetadata: import("type-fest/source/readonly-d
|
|
|
10
10
|
className: string;
|
|
11
11
|
mimeTypes: string[];
|
|
12
12
|
documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
|
|
13
|
-
|
|
13
|
+
isAvailableInBrowser: false;
|
|
14
14
|
requiredExecutables: never[];
|
|
15
15
|
}>;
|
|
16
16
|
/**
|
|
@@ -19,7 +19,7 @@ export declare const websiteScraperMetadata: import("type-fest/source/readonly-d
|
|
|
19
19
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
20
20
|
*
|
|
21
21
|
* @public exported from `@promptbook/core`
|
|
22
|
-
* @public exported from `@promptbook/
|
|
22
|
+
* @public exported from `@promptbook/wizard`
|
|
23
23
|
* @public exported from `@promptbook/cli`
|
|
24
24
|
*/
|
|
25
25
|
export declare const _WebsiteScraperMetadataRegistration: Registration;
|
|
@@ -658,7 +658,7 @@ export type number_seed = number_percent;
|
|
|
658
658
|
* - ❤ is equivalent to more than 1
|
|
659
659
|
*/
|
|
660
660
|
export type number_likeness = number;
|
|
661
|
-
export type
|
|
661
|
+
export type number_milliseconds = number_integer;
|
|
662
662
|
export type number_seconds = number;
|
|
663
663
|
export type number_minutes = number;
|
|
664
664
|
export type number_hours = number;
|
|
@@ -11,7 +11,7 @@ import type { string_filename } from '../../types/typeAliases';
|
|
|
11
11
|
*/
|
|
12
12
|
export declare function listAllFiles(path: string_dirname, isRecursive: boolean, fs: FilesystemTools): Promise<Array<string_filename>>;
|
|
13
13
|
/**
|
|
14
|
-
* TODO: [😶] Unite
|
|
14
|
+
* TODO: [😶] Unite folder listing
|
|
15
15
|
* Note: Not [~🟢~] because it is not directly dependent on `fs
|
|
16
16
|
* TODO: [🖇] What about symlinks?
|
|
17
17
|
*/
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.
|
|
18
|
+
* It follows semantic versioning (e.g., `0.98.0-1`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
|
@@ -5,9 +5,9 @@ import type { PrepareAndScrapeOptions } from '../prepare/PrepareAndScrapeOptions
|
|
|
5
5
|
import type { string_filename } from '../types/typeAliases';
|
|
6
6
|
import type { string_pipeline_url } from '../types/typeAliases';
|
|
7
7
|
/**
|
|
8
|
-
* @see ./
|
|
8
|
+
* @see ./wizard.ts `getPipeline` method
|
|
9
9
|
*
|
|
10
|
-
* @private usable through `ptbk run` and `@
|
|
10
|
+
* @private usable through `ptbk run` and `@promptbook/wizard`
|
|
11
11
|
*/
|
|
12
12
|
export declare function $getCompiledBook(tools: Required<Pick<ExecutionTools, 'fs' | 'fetch'>>, pipelineSource: string_filename | string_pipeline_url | PipelineString, options?: PrepareAndScrapeOptions): Promise<PipelineJson>;
|
|
13
13
|
/**
|
|
@@ -7,14 +7,14 @@ import type { string_filename } from '../types/typeAliases';
|
|
|
7
7
|
import type { string_parameter_value } from '../types/typeAliases';
|
|
8
8
|
import type { string_pipeline_url } from '../types/typeAliases';
|
|
9
9
|
/**
|
|
10
|
-
*
|
|
11
|
-
* Look at `
|
|
10
|
+
* Wizard for simple usage of the Promptbook
|
|
11
|
+
* Look at `wizard` for more details
|
|
12
12
|
*
|
|
13
13
|
* Note: This works only in Node.js environment and looks for the configuration, environment, tools and cache in the Node.js environment
|
|
14
14
|
*
|
|
15
15
|
* @private just for single instance
|
|
16
16
|
*/
|
|
17
|
-
declare class
|
|
17
|
+
declare class Wizard {
|
|
18
18
|
/**
|
|
19
19
|
* Run the book
|
|
20
20
|
*
|
|
@@ -53,14 +53,14 @@ declare class Wizzard {
|
|
|
53
53
|
getCompiledBook(pipelineSource: string_filename | string_pipeline_url | PipelineString): Promise<PipelineJson>;
|
|
54
54
|
}
|
|
55
55
|
/**
|
|
56
|
-
*
|
|
56
|
+
* Wizard for simple usage of the Promptbook
|
|
57
57
|
*
|
|
58
58
|
* Note: This works only in Node.js environment and looks for the configuration, environment, tools and cache in the Node.js environment
|
|
59
59
|
*
|
|
60
60
|
* @singleton
|
|
61
|
-
* @public exported from `@promptbook/
|
|
61
|
+
* @public exported from `@promptbook/wizard`
|
|
62
62
|
*/
|
|
63
|
-
export declare const
|
|
63
|
+
export declare const wizard: Wizard;
|
|
64
64
|
export {};
|
|
65
65
|
/**
|
|
66
66
|
* TODO: [🧠] Maybe some way how to handle the progress and streaming?
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/node",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.98.0-2",
|
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -70,23 +70,11 @@
|
|
|
70
70
|
"node": ">=16.0.0",
|
|
71
71
|
"npm": ">=8.0.0"
|
|
72
72
|
},
|
|
73
|
-
"cspell": {
|
|
74
|
-
"version": "0.2",
|
|
75
|
-
"language": "en",
|
|
76
|
-
"ignorePaths": [
|
|
77
|
-
"node_modules",
|
|
78
|
-
".next",
|
|
79
|
-
"coverage",
|
|
80
|
-
"dist",
|
|
81
|
-
".git"
|
|
82
|
-
],
|
|
83
|
-
"words": []
|
|
84
|
-
},
|
|
85
73
|
"main": "./umd/index.umd.js",
|
|
86
74
|
"module": "./esm/index.es.js",
|
|
87
75
|
"typings": "./esm/typings/src/_packages/node.index.d.ts",
|
|
88
76
|
"peerDependencies": {
|
|
89
|
-
"@promptbook/core": "0.
|
|
77
|
+
"@promptbook/core": "0.98.0-2"
|
|
90
78
|
},
|
|
91
79
|
"dependencies": {
|
|
92
80
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -46,7 +46,7 @@
|
|
|
46
46
|
* @generated
|
|
47
47
|
* @see https://github.com/webgptorg/promptbook
|
|
48
48
|
*/
|
|
49
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
49
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.98.0-2';
|
|
50
50
|
/**
|
|
51
51
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
52
52
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -212,11 +212,11 @@
|
|
|
212
212
|
/*
|
|
213
213
|
TODO: [🌃]
|
|
214
214
|
/**
|
|
215
|
-
* Id of application for the
|
|
215
|
+
* Id of application for the wizard when using remote server
|
|
216
216
|
*
|
|
217
217
|
* @public exported from `@promptbook/core`
|
|
218
218
|
* /
|
|
219
|
-
ex-port const
|
|
219
|
+
ex-port const WIZARD_APP_ID: string_app_id = 'wizard';
|
|
220
220
|
*/
|
|
221
221
|
/**
|
|
222
222
|
* The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
|
|
@@ -538,7 +538,7 @@
|
|
|
538
538
|
else {
|
|
539
539
|
for (const [subName, subValue] of Object.entries(value)) {
|
|
540
540
|
if (subValue === undefined) {
|
|
541
|
-
// Note: undefined in object is serializable - it is just
|
|
541
|
+
// Note: undefined in object is serializable - it is just omitted
|
|
542
542
|
continue;
|
|
543
543
|
}
|
|
544
544
|
checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
|
|
@@ -1147,7 +1147,7 @@
|
|
|
1147
1147
|
* @param fs Filesystem tools
|
|
1148
1148
|
* @returns Pipelines loaded from the archive
|
|
1149
1149
|
*
|
|
1150
|
-
* @private utility of
|
|
1150
|
+
* @private utility of Promptbook
|
|
1151
1151
|
*/
|
|
1152
1152
|
async function loadArchive(filePath, fs) {
|
|
1153
1153
|
if (!filePath.endsWith('.bookc')) {
|
|
@@ -1169,7 +1169,7 @@
|
|
|
1169
1169
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
1170
1170
|
*/
|
|
1171
1171
|
|
|
1172
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1172
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1173
1173
|
|
|
1174
1174
|
/**
|
|
1175
1175
|
* Checks if value is valid email
|
|
@@ -1311,7 +1311,7 @@
|
|
|
1311
1311
|
});
|
|
1312
1312
|
}
|
|
1313
1313
|
catch (error) {
|
|
1314
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
1314
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
1315
1315
|
console.error('There was an error with prettifying the markdown, using the original as the fallback', {
|
|
1316
1316
|
error,
|
|
1317
1317
|
html: content,
|
|
@@ -1602,7 +1602,7 @@
|
|
|
1602
1602
|
|
|
1603
1603
|
Note: You have probably forgotten to run "ptbk make" to update the collection
|
|
1604
1604
|
Note: Pipelines with the same URL are not allowed
|
|
1605
|
-
Only
|
|
1605
|
+
Only exception is when the pipelines are identical
|
|
1606
1606
|
|
|
1607
1607
|
`));
|
|
1608
1608
|
}
|
|
@@ -3091,13 +3091,13 @@
|
|
|
3091
3091
|
|
|
3092
3092
|
Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
|
|
3093
3093
|
`);
|
|
3094
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
3094
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
3095
3095
|
console.warn(warningMessage);
|
|
3096
3096
|
// <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
|
|
3097
3097
|
/*
|
|
3098
3098
|
return {
|
|
3099
3099
|
async listModels() {
|
|
3100
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
3100
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
3101
3101
|
console.warn(
|
|
3102
3102
|
spaceTrim(
|
|
3103
3103
|
(block) => `
|
|
@@ -4282,10 +4282,10 @@
|
|
|
4282
4282
|
*/
|
|
4283
4283
|
async function getKnowledgeForTask(options) {
|
|
4284
4284
|
const { tools, preparedPipeline, task, parameters } = options;
|
|
4285
|
-
const
|
|
4286
|
-
const
|
|
4285
|
+
const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
|
|
4286
|
+
const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
|
|
4287
4287
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
4288
|
-
if (
|
|
4288
|
+
if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
|
|
4289
4289
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
4290
4290
|
}
|
|
4291
4291
|
try {
|
|
@@ -4296,7 +4296,7 @@
|
|
|
4296
4296
|
title: 'Knowledge Search',
|
|
4297
4297
|
modelRequirements: {
|
|
4298
4298
|
modelVariant: 'EMBEDDING',
|
|
4299
|
-
modelName:
|
|
4299
|
+
modelName: firstKnowledgeIndex.modelName,
|
|
4300
4300
|
},
|
|
4301
4301
|
content: task.content,
|
|
4302
4302
|
parameters,
|
|
@@ -4304,7 +4304,7 @@
|
|
|
4304
4304
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
4305
4305
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
4306
4306
|
const { index } = knowledgePiece;
|
|
4307
|
-
const knowledgePieceIndex = index.find((i) => i.modelName ===
|
|
4307
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
|
|
4308
4308
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
4309
4309
|
if (knowledgePieceIndex === undefined) {
|
|
4310
4310
|
return {
|
|
@@ -4325,8 +4325,8 @@
|
|
|
4325
4325
|
task,
|
|
4326
4326
|
taskEmbeddingPrompt,
|
|
4327
4327
|
taskEmbeddingResult,
|
|
4328
|
-
|
|
4329
|
-
|
|
4328
|
+
firstKnowledgePiece,
|
|
4329
|
+
firstKnowledgeIndex,
|
|
4330
4330
|
knowledgePiecesWithRelevance,
|
|
4331
4331
|
knowledgePiecesSorted,
|
|
4332
4332
|
knowledgePiecesLimited,
|
|
@@ -4395,7 +4395,7 @@
|
|
|
4395
4395
|
* @private internal utility of `createPipelineExecutor`
|
|
4396
4396
|
*/
|
|
4397
4397
|
async function executeTask(options) {
|
|
4398
|
-
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled,
|
|
4398
|
+
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
|
|
4399
4399
|
const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
|
|
4400
4400
|
// Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
|
|
4401
4401
|
const usedParameterNames = extractParameterNamesFromTask(currentTask);
|
|
@@ -4483,7 +4483,7 @@
|
|
|
4483
4483
|
cacheDirname,
|
|
4484
4484
|
intermediateFilesStrategy,
|
|
4485
4485
|
isAutoInstalled,
|
|
4486
|
-
|
|
4486
|
+
isNotPreparedWarningSuppressed,
|
|
4487
4487
|
});
|
|
4488
4488
|
await onProgress({
|
|
4489
4489
|
outputParameters: {
|
|
@@ -4578,7 +4578,7 @@
|
|
|
4578
4578
|
}
|
|
4579
4579
|
return exportJson({
|
|
4580
4580
|
name: `executionReport`,
|
|
4581
|
-
message: `
|
|
4581
|
+
message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
|
|
4582
4582
|
order: [],
|
|
4583
4583
|
value: {
|
|
4584
4584
|
isSuccessful: false,
|
|
@@ -4615,7 +4615,7 @@
|
|
|
4615
4615
|
return exportJson({
|
|
4616
4616
|
name: 'pipelineExecutorResult',
|
|
4617
4617
|
message: spaceTrim.spaceTrim((block) => `
|
|
4618
|
-
|
|
4618
|
+
Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
|
|
4619
4619
|
|
|
4620
4620
|
${block(pipelineIdentification)}
|
|
4621
4621
|
`),
|
|
@@ -4756,7 +4756,7 @@
|
|
|
4756
4756
|
}
|
|
4757
4757
|
return exportJson({
|
|
4758
4758
|
name: 'pipelineExecutorResult',
|
|
4759
|
-
message: `
|
|
4759
|
+
message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
|
|
4760
4760
|
order: [],
|
|
4761
4761
|
value: {
|
|
4762
4762
|
isSuccessful: false,
|
|
@@ -4807,7 +4807,7 @@
|
|
|
4807
4807
|
* @public exported from `@promptbook/core`
|
|
4808
4808
|
*/
|
|
4809
4809
|
function createPipelineExecutor(options) {
|
|
4810
|
-
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE,
|
|
4810
|
+
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
|
|
4811
4811
|
validatePipeline(pipeline);
|
|
4812
4812
|
const pipelineIdentification = (() => {
|
|
4813
4813
|
// Note: This is a 😐 implementation of [🚞]
|
|
@@ -4824,7 +4824,7 @@
|
|
|
4824
4824
|
if (isPipelinePrepared(pipeline)) {
|
|
4825
4825
|
preparedPipeline = pipeline;
|
|
4826
4826
|
}
|
|
4827
|
-
else if (
|
|
4827
|
+
else if (isNotPreparedWarningSuppressed !== true) {
|
|
4828
4828
|
console.warn(spaceTrim.spaceTrim((block) => `
|
|
4829
4829
|
Pipeline is not prepared
|
|
4830
4830
|
|
|
@@ -4857,7 +4857,7 @@
|
|
|
4857
4857
|
maxParallelCount,
|
|
4858
4858
|
csvSettings,
|
|
4859
4859
|
isVerbose,
|
|
4860
|
-
|
|
4860
|
+
isNotPreparedWarningSuppressed,
|
|
4861
4861
|
rootDirname,
|
|
4862
4862
|
cacheDirname,
|
|
4863
4863
|
intermediateFilesStrategy,
|
|
@@ -4866,7 +4866,7 @@
|
|
|
4866
4866
|
assertsError(error);
|
|
4867
4867
|
return exportJson({
|
|
4868
4868
|
name: 'pipelineExecutorResult',
|
|
4869
|
-
message: `
|
|
4869
|
+
message: `Unsuccessful PipelineExecutorResult, last catch`,
|
|
4870
4870
|
order: [],
|
|
4871
4871
|
value: {
|
|
4872
4872
|
isSuccessful: false,
|
|
@@ -4936,12 +4936,12 @@
|
|
|
4936
4936
|
get title() {
|
|
4937
4937
|
return `${llmTools.title} (+usage)`;
|
|
4938
4938
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
4939
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
4939
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
4940
4940
|
},
|
|
4941
4941
|
get description() {
|
|
4942
4942
|
return `${llmTools.description} (+usage)`;
|
|
4943
4943
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
4944
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
4944
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
4945
4945
|
},
|
|
4946
4946
|
checkConfiguration() {
|
|
4947
4947
|
return /* not await */ llmTools.checkConfiguration();
|
|
@@ -5228,17 +5228,17 @@
|
|
|
5228
5228
|
* Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
|
|
5229
5229
|
*/
|
|
5230
5230
|
const all = [];
|
|
5231
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
5231
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
|
|
5232
5232
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
5233
5233
|
continue;
|
|
5234
5234
|
}
|
|
5235
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
5235
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
5236
5236
|
}
|
|
5237
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
5237
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
|
|
5238
5238
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
5239
5239
|
continue;
|
|
5240
5240
|
}
|
|
5241
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
5241
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
5242
5242
|
}
|
|
5243
5243
|
for (const { metadata } of availableScrapers) {
|
|
5244
5244
|
all.push(metadata);
|
|
@@ -5250,8 +5250,8 @@
|
|
|
5250
5250
|
const isInstalled = $scrapersRegister
|
|
5251
5251
|
.list()
|
|
5252
5252
|
.find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
|
|
5253
|
-
const
|
|
5254
|
-
return { ...metadata, isMetadataAviailable, isInstalled,
|
|
5253
|
+
const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
|
|
5254
|
+
return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
|
|
5255
5255
|
});
|
|
5256
5256
|
if (metadata.length === 0) {
|
|
5257
5257
|
return spaceTrim__default["default"](`
|
|
@@ -5264,7 +5264,7 @@
|
|
|
5264
5264
|
return spaceTrim__default["default"]((block) => `
|
|
5265
5265
|
Available scrapers are:
|
|
5266
5266
|
${block(metadata
|
|
5267
|
-
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes,
|
|
5267
|
+
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
|
|
5268
5268
|
const more = [];
|
|
5269
5269
|
// TODO: [🧠] Maybe use `documentationUrl`
|
|
5270
5270
|
if (isMetadataAviailable) {
|
|
@@ -5273,16 +5273,16 @@
|
|
|
5273
5273
|
if (isInstalled) {
|
|
5274
5274
|
more.push(`🟩 Installed`);
|
|
5275
5275
|
} // not else
|
|
5276
|
-
if (
|
|
5276
|
+
if (isAvailableInTools) {
|
|
5277
5277
|
more.push(`🟦 Available in tools`);
|
|
5278
5278
|
} // not else
|
|
5279
5279
|
if (!isMetadataAviailable && isInstalled) {
|
|
5280
5280
|
more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
|
|
5281
5281
|
} // not else
|
|
5282
|
-
if (!isInstalled &&
|
|
5282
|
+
if (!isInstalled && isAvailableInTools) {
|
|
5283
5283
|
more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
|
|
5284
5284
|
} // not else
|
|
5285
|
-
if (!
|
|
5285
|
+
if (!isAvailableInBrowser) {
|
|
5286
5286
|
more.push(`Not usable in browser`);
|
|
5287
5287
|
}
|
|
5288
5288
|
const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
|
|
@@ -5748,7 +5748,7 @@
|
|
|
5748
5748
|
/**
|
|
5749
5749
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
5750
5750
|
* Put `knowledgePieces` into `PrepareKnowledgeOptions`
|
|
5751
|
-
* TODO: [🪂] More than max things can run in parallel by
|
|
5751
|
+
* TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
|
|
5752
5752
|
* TODO: [🧠][❎] Do here proper M:N mapping
|
|
5753
5753
|
* [x] One source can make multiple pieces
|
|
5754
5754
|
* [ ] One piece can have multiple sources
|
|
@@ -6636,7 +6636,7 @@
|
|
|
6636
6636
|
$taskJson.expectations[unit] = $taskJson.expectations[unit] || {};
|
|
6637
6637
|
if (command.sign === 'MINIMUM' || command.sign === 'EXACTLY') {
|
|
6638
6638
|
if ($taskJson.expectations[unit].min !== undefined) {
|
|
6639
|
-
throw new ParseError(`Already defined
|
|
6639
|
+
throw new ParseError(`Already defined minimum ${$taskJson.expectations[unit].min} ${command.unit.toLowerCase()}, now trying to redefine it to ${command.amount}`);
|
|
6640
6640
|
}
|
|
6641
6641
|
$taskJson.expectations[unit].min = command.amount;
|
|
6642
6642
|
} /* not else */
|
|
@@ -10889,7 +10889,7 @@
|
|
|
10889
10889
|
return fileNames;
|
|
10890
10890
|
}
|
|
10891
10891
|
/**
|
|
10892
|
-
* TODO: [😶] Unite
|
|
10892
|
+
* TODO: [😶] Unite folder listing
|
|
10893
10893
|
* Note: Not [~🟢~] because it is not directly dependent on `fs
|
|
10894
10894
|
* TODO: [🖇] What about symlinks?
|
|
10895
10895
|
*/
|
|
@@ -11036,7 +11036,7 @@
|
|
|
11036
11036
|
if (isCrashedOnError) {
|
|
11037
11037
|
throw new CollectionError(wrappedErrorMessage);
|
|
11038
11038
|
}
|
|
11039
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
11039
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
11040
11040
|
console.error(wrappedErrorMessage);
|
|
11041
11041
|
}
|
|
11042
11042
|
}
|
|
@@ -11103,7 +11103,7 @@
|
|
|
11103
11103
|
|
|
11104
11104
|
Note: You have probably forgotten to run "ptbk make" to update the collection
|
|
11105
11105
|
Note: Pipelines with the same URL are not allowed
|
|
11106
|
-
Only
|
|
11106
|
+
Only exception is when the pipelines are identical
|
|
11107
11107
|
|
|
11108
11108
|
`));
|
|
11109
11109
|
}
|
|
@@ -11127,7 +11127,7 @@
|
|
|
11127
11127
|
if (isCrashedOnError) {
|
|
11128
11128
|
throw new CollectionError(wrappedErrorMessage);
|
|
11129
11129
|
}
|
|
11130
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
11130
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
11131
11131
|
console.error(wrappedErrorMessage);
|
|
11132
11132
|
}
|
|
11133
11133
|
}
|