@promptbook/legacy-documents 0.81.0-19 → 0.81.0-21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/README.md +39 -3
  2. package/esm/index.es.js +66 -15
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/books/index.d.ts +38 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +4 -4
  6. package/esm/typings/src/config.d.ts +1 -1
  7. package/esm/typings/src/conversion/compilePipeline.d.ts +1 -4
  8. package/esm/typings/src/conversion/{precompilePipeline.d.ts → parsePipeline.d.ts} +2 -2
  9. package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +1 -1
  10. package/esm/typings/src/high-level-abstractions/index.d.ts +1 -1
  11. package/esm/typings/src/pipeline/book-notation.d.ts +2 -2
  12. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +2 -0
  13. package/esm/typings/src/prepare/preparePipeline.d.ts +2 -0
  14. package/esm/typings/src/scrapers/_common/Converter.d.ts +1 -0
  15. package/esm/typings/src/scrapers/_common/Scraper.d.ts +1 -1
  16. package/esm/typings/src/scrapers/_common/ScraperIntermediateSource.d.ts +3 -0
  17. package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +2 -0
  18. package/esm/typings/src/scrapers/pdf/PdfScraper.d.ts +1 -0
  19. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
  20. package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +1 -1
  21. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
  22. package/esm/typings/src/scrapers/website/register-metadata.d.ts +1 -1
  23. package/esm/typings/src/utils/markdown/flattenMarkdown.d.ts +1 -1
  24. package/esm/typings/src/utils/organization/$sideEffect.d.ts +9 -0
  25. package/esm/typings/src/wizzard/wizzard.d.ts +23 -11
  26. package/package.json +2 -2
  27. package/umd/index.umd.js +66 -15
  28. package/umd/index.umd.js.map +1 -1
  29. /package/esm/typings/src/conversion/{precompilePipeline.test.d.ts → parsePipeline.test.d.ts} +0 -0
@@ -89,5 +89,43 @@ declare const _default: ({
89
89
  content: string;
90
90
  }[];
91
91
  sourceFile: string;
92
+ } | {
93
+ title: string;
94
+ pipelineUrl: string;
95
+ formfactorName: string;
96
+ parameters: {
97
+ name: string;
98
+ description: string;
99
+ isInput: boolean;
100
+ isOutput: boolean;
101
+ }[];
102
+ tasks: {
103
+ taskType: string;
104
+ name: string;
105
+ title: string;
106
+ content: string;
107
+ resultingParameterName: string;
108
+ expectations: {
109
+ words: {
110
+ min: number;
111
+ max: number;
112
+ };
113
+ lines: {
114
+ min: number;
115
+ max: number;
116
+ };
117
+ };
118
+ dependentParameterNames: string[];
119
+ }[];
120
+ personas: never[];
121
+ preparations: never[];
122
+ knowledgeSources: never[];
123
+ knowledgePieces: never[];
124
+ sources: {
125
+ type: string;
126
+ path: null;
127
+ content: string;
128
+ }[];
129
+ sourceFile: string;
92
130
  })[];
93
131
  export default _default;
@@ -10,7 +10,7 @@ import { ADMIN_GITHUB_NAME } from '../config';
10
10
  import { CLAIM } from '../config';
11
11
  import { LOGO_LIGHT_SRC } from '../config';
12
12
  import { LOGO_DARK_SRC } from '../config';
13
- import { DEFAULT_TITLE } from '../config';
13
+ import { DEFAULT_BOOK_TITLE } from '../config';
14
14
  import { MAX_FILENAME_LENGTH } from '../config';
15
15
  import { DEFAULT_INTERMEDIATE_FILES_STRATEGY } from '../config';
16
16
  import { DEFAULT_MAX_PARALLEL_COUNT } from '../config';
@@ -31,8 +31,8 @@ import { DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME } from '../config';
31
31
  import { ORDER_OF_PIPELINE_JSON } from '../constants';
32
32
  import { RESERVED_PARAMETER_NAMES } from '../constants';
33
33
  import { compilePipeline } from '../conversion/compilePipeline';
34
+ import { parsePipeline } from '../conversion/parsePipeline';
34
35
  import { pipelineJsonToString } from '../conversion/pipelineJsonToString';
35
- import { precompilePipeline } from '../conversion/precompilePipeline';
36
36
  import { prettifyPipelineString } from '../conversion/prettify/prettifyPipelineString';
37
37
  import { extractParameterNamesFromTask } from '../conversion/utils/extractParameterNamesFromTask';
38
38
  import { validatePipeline } from '../conversion/validation/validatePipeline';
@@ -132,7 +132,7 @@ export { ADMIN_GITHUB_NAME };
132
132
  export { CLAIM };
133
133
  export { LOGO_LIGHT_SRC };
134
134
  export { LOGO_DARK_SRC };
135
- export { DEFAULT_TITLE };
135
+ export { DEFAULT_BOOK_TITLE };
136
136
  export { MAX_FILENAME_LENGTH };
137
137
  export { DEFAULT_INTERMEDIATE_FILES_STRATEGY };
138
138
  export { DEFAULT_MAX_PARALLEL_COUNT };
@@ -153,8 +153,8 @@ export { DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME };
153
153
  export { ORDER_OF_PIPELINE_JSON };
154
154
  export { RESERVED_PARAMETER_NAMES };
155
155
  export { compilePipeline };
156
+ export { parsePipeline };
156
157
  export { pipelineJsonToString };
157
- export { precompilePipeline };
158
158
  export { prettifyPipelineString };
159
159
  export { extractParameterNamesFromTask };
160
160
  export { validatePipeline };
@@ -58,7 +58,7 @@ export declare const LOGO_DARK_SRC: string_url_image;
58
58
  *
59
59
  * @public exported from `@promptbook/core`
60
60
  */
61
- export declare const DEFAULT_TITLE = "Untitled";
61
+ export declare const DEFAULT_BOOK_TITLE = "\u2728 Untitled Book";
62
62
  /**
63
63
  * Warning message for the generated sections and files files
64
64
  *
@@ -5,10 +5,7 @@ import type { PrepareAndScrapeOptions } from '../prepare/PrepareAndScrapeOptions
5
5
  /**
6
6
  * Compile pipeline from string (markdown) format to JSON format
7
7
  *
8
- * Note: There are 3 similar functions:
9
- * - `compilePipeline` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
10
- * - `precompilePipeline` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
11
- * - `preparePipeline` - just one step in the compilation process
8
+ * @see https://github.com/webgptorg/promptbook/discussions/196
12
9
  *
13
10
  * Note: This function does not validate logic of the pipeline only the parsing
14
11
  * Note: This function acts as compilation process
@@ -5,7 +5,7 @@ import type { PipelineString } from '../pipeline/PipelineString';
5
5
  *
6
6
  * Note: There are 3 similar functions:
7
7
  * - `compilePipeline` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
8
- * - `precompilePipeline` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
8
+ * - `parsePipeline` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
9
9
  * - `preparePipeline` - just one step in the compilation process
10
10
  *
11
11
  * Note: This function does not validate logic of the pipeline only the parsing
@@ -16,7 +16,7 @@ import type { PipelineString } from '../pipeline/PipelineString';
16
16
  * @throws {ParseError} if the promptbook string is not valid
17
17
  * @public exported from `@promptbook/core`
18
18
  */
19
- export declare function precompilePipeline(pipelineString: PipelineString): PipelineJson;
19
+ export declare function parsePipeline(pipelineString: PipelineString): PipelineJson;
20
20
  /**
21
21
  * TODO: [🧠] Maybe more things here can be refactored as high-level abstractions
22
22
  * TODO: [main] !!!! Warn if used only sync version
@@ -1,6 +1,6 @@
1
1
  import type { $PipelineJson } from '../../commands/_common/types/CommandParser';
2
2
  /**
3
- * Used in `precompilePipeline`
3
+ * Used in `parsePipeline`
4
4
  *
5
5
  * @private
6
6
  */
@@ -1,7 +1,7 @@
1
1
  /**
2
2
  * All high-level abstractions
3
3
  *
4
- * @private internal index of `precompilePipeline` (= used for sync) and `preparePipeline` (= used for async)
4
+ * @private internal index of `parsePipeline` (= used for sync) and `preparePipeline` (= used for async)
5
5
  */
6
6
  export declare const HIGH_LEVEL_ABSTRACTIONS: readonly [{
7
7
  type: "SYNC";
@@ -1,6 +1,6 @@
1
1
  import type { PipelineString } from './PipelineString';
2
2
  /**
3
- * Function for notating a pipeline with a book\`...\ notation as template literal
3
+ * Tag function for notating a pipeline with a book\`...\ notation as template literal
4
4
  *
5
5
  * @param strings @@@
6
6
  * @param values @@@
@@ -9,7 +9,7 @@ import type { PipelineString } from './PipelineString';
9
9
  */
10
10
  export declare function book(strings: TemplateStringsArray, ...values: Array<string>): PipelineString;
11
11
  /**
12
- * TODO: !!!!!! Use book\`...\ notation instead of as PipelineString
12
+ * TODO: Use ACRY book\`...\ notation instead of as PipelineString
13
13
  * TODO: [🧠][🈴] Where is the best location for this file
14
14
  * Note: [💞] Ignore a discrepancy between file name and entity name
15
15
  */
@@ -2,6 +2,8 @@ import type { PipelineJson } from '../pipeline/PipelineJson/PipelineJson';
2
2
  /**
3
3
  * Determine if the pipeline is fully prepared
4
4
  *
5
+ * @see https://github.com/webgptorg/promptbook/discussions/196
6
+ *
5
7
  * @public exported from `@promptbook/core`
6
8
  */
7
9
  export declare function isPipelinePrepared(pipeline: PipelineJson): boolean;
@@ -4,6 +4,8 @@ import type { PrepareAndScrapeOptions } from './PrepareAndScrapeOptions';
4
4
  /**
5
5
  * Prepare pipeline from string (markdown) format to JSON format
6
6
  *
7
+ * @see https://github.com/webgptorg/promptbook/discussions/196
8
+ *
7
9
  * Note: This function does not validate logic of the pipeline
8
10
  * Note: This function acts as part of compilation process
9
11
  * Note: When the pipeline is already prepared, it returns the same pipeline
@@ -4,6 +4,7 @@ import type { ScraperIntermediateSource } from './ScraperIntermediateSource';
4
4
  /**
5
5
  * @@@
6
6
  *
7
+ * Note: [🌏] Converters are not usable in browser because they produce a files
7
8
  */
8
9
  export type Converter = {
9
10
  /**
@@ -7,7 +7,6 @@ import type { string_url } from '../../types/typeAliases';
7
7
  import type { ScraperAndConverterMetadata } from './register/ScraperAndConverterMetadata';
8
8
  /**
9
9
  * @@@
10
- *
11
10
  */
12
11
  export type Scraper = {
13
12
  /**
@@ -53,6 +52,7 @@ export type ScraperSourceHandler = {
53
52
  asText(): Promisable<string>;
54
53
  };
55
54
  /**
55
+ * TODO: [🧠] Maybe split `ScraperSourceHandler` into `ScraperWebsiteSourceHandler` + `ScraperFileSourceHandler`
56
56
  * TODO: [🥽] Add ` asBlob(): Promisable<Blob>;` or asFile
57
57
  * TODO: [🐝] @@@ Annotate all
58
58
  * TODO: [🔼] Export via types
@@ -9,3 +9,6 @@ export type ScraperIntermediateSource = IDestroyable & {
9
9
  */
10
10
  readonly filename: string_absolute_filename;
11
11
  };
12
+ /**
13
+ * Note: [🌏] Converters can be used only in node because they uses `ScraperIntermediateSource` which uses file system
14
+ */
@@ -28,6 +28,8 @@ export type ScraperAndConverterMetadata = Registered & {
28
28
  readonly mimeTypes: ReadonlyArray<string_mime_type>;
29
29
  /**
30
30
  * @@@
31
+ *
32
+ * Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
31
33
  */
32
34
  readonly isAvilableInBrowser: boolean;
33
35
  /**
@@ -37,4 +37,5 @@ export declare class PdfScraper implements Converter, Scraper {
37
37
  * TODO: [👣] Converted pdf documents can act as cached items - there is no need to run conversion each time
38
38
  * TODO: [🪂] Do it in parallel 11:11
39
39
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
40
+ * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
40
41
  */
@@ -12,7 +12,7 @@ export declare const createPdfScraper: ((tools: Pick<ExecutionTools, 'llm'>, opt
12
12
  className: string;
13
13
  mimeTypes: string[];
14
14
  documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
15
- isAvilableInBrowser: true;
15
+ isAvilableInBrowser: false;
16
16
  requiredExecutables: never[];
17
17
  }>;
18
18
  /**
@@ -10,7 +10,7 @@ export declare const pdfScraperMetadata: import("type-fest/source/readonly-deep"
10
10
  className: string;
11
11
  mimeTypes: string[];
12
12
  documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
13
- isAvilableInBrowser: true;
13
+ isAvilableInBrowser: false;
14
14
  requiredExecutables: never[];
15
15
  }>;
16
16
  /**
@@ -12,7 +12,7 @@ export declare const createWebsiteScraper: ((tools: Pick<ExecutionTools, 'llm'>,
12
12
  className: string;
13
13
  mimeTypes: string[];
14
14
  documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
15
- isAvilableInBrowser: true; /**
15
+ isAvilableInBrowser: false; /**
16
16
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
17
17
  */
18
18
  requiredExecutables: never[];
@@ -10,7 +10,7 @@ export declare const websiteScraperMetadata: import("type-fest/source/readonly-d
10
10
  className: string;
11
11
  mimeTypes: string[];
12
12
  documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
13
- isAvilableInBrowser: true;
13
+ isAvilableInBrowser: false;
14
14
  requiredExecutables: never[];
15
15
  }>;
16
16
  /**
@@ -2,7 +2,7 @@ import type { string_markdown } from '../../types/typeAliases';
2
2
  /**
3
3
  * Normalizes the markdown by flattening the structure
4
4
  *
5
- * - It always have h1 - if there is no h1 in the markdown, it will be added "# Untitled"
5
+ * - It always have h1 - if there is no h1 in the markdown, it will be added `DEFAULT_BOOK_TITLE`
6
6
  * - All other headings are normalized to h2
7
7
  *
8
8
  * @public exported from `@promptbook/markdown-utils`
@@ -0,0 +1,9 @@
1
+ import type { really_any } from './really_any';
2
+ /**
3
+ * Just says that the variable is not used directlys but should be kept because the existence of the variable is important
4
+ *
5
+ * @param value any values
6
+ * @returns void
7
+ * @private within the repository
8
+ */
9
+ export declare function $sideEffect(...sideEffectSubjects: ReadonlyArray<really_any>): void;
@@ -8,36 +8,50 @@ import type { InputParameters } from '../types/typeAliases';
8
8
  import type { string_filename } from '../types/typeAliases';
9
9
  import type { string_pipeline_url } from '../types/typeAliases';
10
10
  /**
11
+ * Wizzard for simple usage of the Promptbook
11
12
  * Look at `wizzard` for more details
12
13
  *
14
+ * Note: This works only in Node.js environment and looks for the configuration, environment, tools and cache in the Node.js environment
15
+ *
13
16
  * @private just for single instance
14
17
  */
15
18
  declare class Wizzard {
16
19
  /**
17
- * @@@!!!!!!
20
+ * Run the book
21
+ *
22
+ * It can be loaded from:
23
+ * 1) As a file ./books/write-cv.book.md
24
+ * 2) As a URL https://promptbook.studio/hejny/write-cv.book.md found in ./books folder recursively
25
+ * 2) As a URL https://promptbook.studio/hejny/write-cv.book.md fetched from the internet
26
+ * 3) As a string
27
+ *
28
+ * Note: This works simmilar to the `ptbk run` command
18
29
  */
19
- execute(book: string_pipeline_url, inputParameters: InputParameters, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<PipelineExecutorResult>;
30
+ execute(book: string_pipeline_url | string_filename | PipelineString, inputParameters: InputParameters, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<PipelineExecutorResult>;
20
31
  private executionTools;
21
32
  /**
22
- * @@@!!!
33
+ * Provides the tools automatically for the Node.js environment
23
34
  *
24
35
  * @param pipelineSource
25
36
  */
26
37
  getExecutionTools(): Promise<Required<Pick<ExecutionTools, 'fs' | 'fetch'>>>;
27
38
  /**
28
- * TODO: Make standalone function from this exported from node and used here and in `ptbk run`
29
- * @@@!!!
39
+ * Load book from the source
30
40
  *
31
- * Strategies:
32
- * 1) @@@!!!
33
- * 2) @@@!!!
41
+ * Pipelines can be loaded from:
42
+ * 1) As a file ./books/write-cv.book.md
43
+ * 2) As a URL https://promptbook.studio/hejny/write-cv.book.md found in ./books folder recursively
44
+ * 2) As a URL https://promptbook.studio/hejny/write-cv.book.md fetched from the internet
45
+ * 3) As a string
34
46
  *
35
47
  * @param pipelineSource
36
48
  */
37
49
  getCompiledBook(pipelineSource: string_filename | string_pipeline_url | PipelineString): Promise<PipelineJson>;
38
50
  }
39
51
  /**
40
- * 🧙‍♂️ @@@
52
+ * Wizzard for simple usage of the Promptbook
53
+ *
54
+ * Note: This works only in Node.js environment and looks for the configuration, environment, tools and cache in the Node.js environment
41
55
  *
42
56
  * @singleton
43
57
  * @public exported from `@promptbook/wizzard`
@@ -45,7 +59,5 @@ declare class Wizzard {
45
59
  export declare const wizzard: Wizzard;
46
60
  export {};
47
61
  /**
48
- * TODO: !!!!!! Mark in jsdoc as non-pure
49
- * TODO: !!!!!! Add to readmes - one markdown here imported in all packages
50
62
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
51
63
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/legacy-documents",
3
- "version": "0.81.0-19",
3
+ "version": "0.81.0-21",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "--note-0": " <- [🐊]",
6
6
  "private": false,
@@ -54,7 +54,7 @@
54
54
  "module": "./esm/index.es.js",
55
55
  "typings": "./esm/typings/src/_packages/legacy-documents.index.d.ts",
56
56
  "peerDependencies": {
57
- "@promptbook/core": "0.81.0-19"
57
+ "@promptbook/core": "0.81.0-21"
58
58
  },
59
59
  "dependencies": {
60
60
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- var PROMPTBOOK_ENGINE_VERSION = '0.81.0-18';
28
+ var PROMPTBOOK_ENGINE_VERSION = '0.81.0-20';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -190,6 +190,12 @@
190
190
  * @public exported from `@promptbook/core`
191
191
  */
192
192
  var ADMIN_GITHUB_NAME = 'hejny';
193
+ /**
194
+ * When the title is not provided, the default title is used
195
+ *
196
+ * @public exported from `@promptbook/core`
197
+ */
198
+ var DEFAULT_BOOK_TITLE = "\u2728 Untitled Book";
193
199
  // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
194
200
  /**
195
201
  * The maximum number of iterations for a loops
@@ -1108,7 +1114,7 @@
1108
1114
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
1109
1115
  */
1110
1116
 
1111
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"}];
1117
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book.md",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book.md`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book.md"}];
1112
1118
 
1113
1119
  /**
1114
1120
  * Prettify the html code
@@ -2513,11 +2519,16 @@
2513
2519
  /**
2514
2520
  * Determine if the pipeline is fully prepared
2515
2521
  *
2522
+ * @see https://github.com/webgptorg/promptbook/discussions/196
2523
+ *
2516
2524
  * @public exported from `@promptbook/core`
2517
2525
  */
2518
2526
  function isPipelinePrepared(pipeline) {
2519
2527
  // Note: Ignoring `pipeline.preparations` @@@
2520
2528
  // Note: Ignoring `pipeline.knowledgePieces` @@@
2529
+ if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
2530
+ return false;
2531
+ }
2521
2532
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
2522
2533
  return false;
2523
2534
  }
@@ -3838,7 +3849,7 @@
3838
3849
  partialPieces = __spreadArray([], __read(partialPiecesUnchecked), false);
3839
3850
  return [2 /*return*/, "break"];
3840
3851
  }
3841
- console.warn(spaceTrim__default["default"](function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n \n The source:\n > ").concat(block(knowledgeSource.sourceContent
3852
+ console.warn(spaceTrim__default["default"](function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n\n The source:\n ").concat(block(knowledgeSource.sourceContent
3842
3853
  .split('\n')
3843
3854
  .map(function (line) { return "> ".concat(line); })
3844
3855
  .join('\n')), "\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
@@ -3876,7 +3887,7 @@
3876
3887
  return [7 /*endfinally*/];
3877
3888
  case 9:
3878
3889
  if (partialPieces === null) {
3879
- throw new KnowledgeScrapeError(spaceTrim__default["default"](function (block) { return "\n Cannot scrape knowledge\n \n The source:\n > ".concat(block(knowledgeSource.sourceContent
3890
+ throw new KnowledgeScrapeError(spaceTrim__default["default"](function (block) { return "\n Cannot scrape knowledge\n\n The source:\n > ".concat(block(knowledgeSource.sourceContent
3880
3891
  .split('\n')
3881
3892
  .map(function (line) { return "> ".concat(line); })
3882
3893
  .join('\n')), "\n\n No scraper found for the mime type \"").concat(sourceHandler.mimeType, "\"\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
@@ -3977,6 +3988,8 @@
3977
3988
  /**
3978
3989
  * Prepare pipeline from string (markdown) format to JSON format
3979
3990
  *
3991
+ * @see https://github.com/webgptorg/promptbook/discussions/196
3992
+ *
3980
3993
  * Note: This function does not validate logic of the pipeline
3981
3994
  * Note: This function acts as part of compilation process
3982
3995
  * Note: When the pipeline is already prepared, it returns the same pipeline
@@ -3989,16 +4002,17 @@
3989
4002
  <- TODO: [🧠][🪑] `promptbookVersion` */
3990
4003
  knowledgeSources /*
3991
4004
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3992
- <- TODO: [🧊] `preparations` */, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
4005
+ <- TODO: [🧊] `preparations` */, sources, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, title, collection, prepareTitleExecutor, _c, result, outputParameters, titleRaw, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
4006
+ var _d;
3993
4007
  var _this = this;
3994
- return __generator(this, function (_c) {
3995
- switch (_c.label) {
4008
+ return __generator(this, function (_e) {
4009
+ switch (_e.label) {
3996
4010
  case 0:
3997
4011
  if (isPipelinePrepared(pipeline)) {
3998
4012
  return [2 /*return*/, pipeline];
3999
4013
  }
4000
4014
  rootDirname = options.rootDirname, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? DEFAULT_MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? DEFAULT_IS_VERBOSE : _b;
4001
- parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
4015
+ parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas, sources = pipeline.sources;
4002
4016
  if (tools === undefined || tools.llm === undefined) {
4003
4017
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4004
4018
  }
@@ -4016,6 +4030,40 @@
4016
4030
  // <- TODO: [🧊]
4017
4031
  currentPreparation,
4018
4032
  ];
4033
+ title = pipeline.title;
4034
+ if (!(title === undefined || title === '' || title === DEFAULT_BOOK_TITLE)) return [3 /*break*/, 3];
4035
+ collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
4036
+ _c = createPipelineExecutor;
4037
+ _d = {};
4038
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-title.book.md')];
4039
+ case 1:
4040
+ prepareTitleExecutor = _c.apply(void 0, [(_d.pipeline = _e.sent(),
4041
+ _d.tools = tools,
4042
+ _d)]);
4043
+ return [4 /*yield*/, prepareTitleExecutor({
4044
+ book: sources
4045
+ .map(function (_a) {
4046
+ var content = _a.content;
4047
+ return content;
4048
+ })
4049
+ .join('\n\n')
4050
+ // TODO: !!!!!!! Parameters in parameters - DO NOT ALLOW, ESCAPE:
4051
+ .split('{')
4052
+ .join('[')
4053
+ .split('}')
4054
+ .join(']'),
4055
+ })];
4056
+ case 2:
4057
+ result = _e.sent();
4058
+ assertsExecutionSuccessful(result);
4059
+ outputParameters = result.outputParameters;
4060
+ titleRaw = outputParameters.title;
4061
+ if (isVerbose) {
4062
+ console.info("The title is \"".concat(titleRaw, "\""));
4063
+ }
4064
+ title = titleRaw || DEFAULT_BOOK_TITLE;
4065
+ _e.label = 3;
4066
+ case 3:
4019
4067
  preparedPersonas = new Array(personas.length);
4020
4068
  return [4 /*yield*/, forEachAsync(personas, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (persona, index) { return __awaiter(_this, void 0, void 0, function () {
4021
4069
  var modelRequirements, preparedPersona;
@@ -4034,12 +4082,12 @@
4034
4082
  }
4035
4083
  });
4036
4084
  }); })];
4037
- case 1:
4038
- _c.sent();
4085
+ case 4:
4086
+ _e.sent();
4039
4087
  knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
4040
4088
  return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, __assign(__assign({}, tools), { llm: llmToolsWithUsage }), __assign(__assign({}, options), { rootDirname: rootDirname, maxParallelCount: maxParallelCount /* <- TODO: [🪂] */, isVerbose: isVerbose }))];
4041
- case 2:
4042
- partialknowledgePiecesPrepared = _c.sent();
4089
+ case 5:
4090
+ partialknowledgePiecesPrepared = _e.sent();
4043
4091
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
4044
4092
  return [4 /*yield*/, prepareTasks({
4045
4093
  parameters: parameters,
@@ -4050,8 +4098,8 @@
4050
4098
  maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
4051
4099
  isVerbose: isVerbose,
4052
4100
  })];
4053
- case 3:
4054
- tasksPrepared = (_c.sent()).tasksPrepared;
4101
+ case 6:
4102
+ tasksPrepared = (_e.sent()).tasksPrepared;
4055
4103
  // ----- /Tasks preparation -----
4056
4104
  // TODO: [😂] Use here all `AsyncHighLevelAbstraction`
4057
4105
  // Note: Count total usage
@@ -4062,7 +4110,7 @@
4062
4110
  order: ORDER_OF_PIPELINE_JSON,
4063
4111
  value: __assign(__assign({}, pipeline), {
4064
4112
  // <- TODO: Probbably deeply clone the pipeline because `$exportJson` freezes the subobjects
4065
- knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
4113
+ title: title, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
4066
4114
  // <- TODO: [🪓] Here should be no need for spreading new array, just ` tasks: tasksPrepared`
4067
4115
  personas: preparedPersonas, preparations: __spreadArray([], __read(preparations), false) }),
4068
4116
  })];
@@ -6117,6 +6165,7 @@
6117
6165
  mimeTypes: ['text/markdown', 'text/plain'],
6118
6166
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
6119
6167
  isAvilableInBrowser: true,
6168
+ // <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
6120
6169
  requiredExecutables: [],
6121
6170
  }); /* <- Note: [🤛] */
6122
6171
  /**
@@ -6315,6 +6364,7 @@
6315
6364
  mimeTypes: ['application/vnd.openxmlformats-officedocument.wordprocessingml.document'],
6316
6365
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
6317
6366
  isAvilableInBrowser: false,
6367
+ // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
6318
6368
  requiredExecutables: ['Pandoc'],
6319
6369
  }); /* <- Note: [🤛] */
6320
6370
  /**
@@ -6479,6 +6529,7 @@
6479
6529
  mimeTypes: ['application/msword', 'text/rtf'],
6480
6530
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
6481
6531
  isAvilableInBrowser: false,
6532
+ // <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
6482
6533
  requiredExecutables: [
6483
6534
  'Pandoc',
6484
6535
  'LibreOffice',