@promptbook/pdf 0.81.0-19 → 0.81.0-21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +39 -3
- package/esm/index.es.js +67 -16
- package/esm/index.es.js.map +1 -1
- package/esm/typings/books/index.d.ts +38 -0
- package/esm/typings/src/_packages/core.index.d.ts +4 -4
- package/esm/typings/src/config.d.ts +1 -1
- package/esm/typings/src/conversion/compilePipeline.d.ts +1 -4
- package/esm/typings/src/conversion/{precompilePipeline.d.ts → parsePipeline.d.ts} +2 -2
- package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +1 -1
- package/esm/typings/src/high-level-abstractions/index.d.ts +1 -1
- package/esm/typings/src/pipeline/book-notation.d.ts +2 -2
- package/esm/typings/src/prepare/isPipelinePrepared.d.ts +2 -0
- package/esm/typings/src/prepare/preparePipeline.d.ts +2 -0
- package/esm/typings/src/scrapers/_common/Converter.d.ts +1 -0
- package/esm/typings/src/scrapers/_common/Scraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/ScraperIntermediateSource.d.ts +3 -0
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +2 -0
- package/esm/typings/src/scrapers/pdf/PdfScraper.d.ts +1 -0
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +1 -1
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +1 -1
- package/esm/typings/src/utils/markdown/flattenMarkdown.d.ts +1 -1
- package/esm/typings/src/utils/organization/$sideEffect.d.ts +9 -0
- package/esm/typings/src/wizzard/wizzard.d.ts +23 -11
- package/package.json +2 -2
- package/umd/index.umd.js +67 -16
- package/umd/index.umd.js.map +1 -1
- /package/esm/typings/src/conversion/{precompilePipeline.test.d.ts → parsePipeline.test.d.ts} +0 -0
|
@@ -89,5 +89,43 @@ declare const _default: ({
|
|
|
89
89
|
content: string;
|
|
90
90
|
}[];
|
|
91
91
|
sourceFile: string;
|
|
92
|
+
} | {
|
|
93
|
+
title: string;
|
|
94
|
+
pipelineUrl: string;
|
|
95
|
+
formfactorName: string;
|
|
96
|
+
parameters: {
|
|
97
|
+
name: string;
|
|
98
|
+
description: string;
|
|
99
|
+
isInput: boolean;
|
|
100
|
+
isOutput: boolean;
|
|
101
|
+
}[];
|
|
102
|
+
tasks: {
|
|
103
|
+
taskType: string;
|
|
104
|
+
name: string;
|
|
105
|
+
title: string;
|
|
106
|
+
content: string;
|
|
107
|
+
resultingParameterName: string;
|
|
108
|
+
expectations: {
|
|
109
|
+
words: {
|
|
110
|
+
min: number;
|
|
111
|
+
max: number;
|
|
112
|
+
};
|
|
113
|
+
lines: {
|
|
114
|
+
min: number;
|
|
115
|
+
max: number;
|
|
116
|
+
};
|
|
117
|
+
};
|
|
118
|
+
dependentParameterNames: string[];
|
|
119
|
+
}[];
|
|
120
|
+
personas: never[];
|
|
121
|
+
preparations: never[];
|
|
122
|
+
knowledgeSources: never[];
|
|
123
|
+
knowledgePieces: never[];
|
|
124
|
+
sources: {
|
|
125
|
+
type: string;
|
|
126
|
+
path: null;
|
|
127
|
+
content: string;
|
|
128
|
+
}[];
|
|
129
|
+
sourceFile: string;
|
|
92
130
|
})[];
|
|
93
131
|
export default _default;
|
|
@@ -10,7 +10,7 @@ import { ADMIN_GITHUB_NAME } from '../config';
|
|
|
10
10
|
import { CLAIM } from '../config';
|
|
11
11
|
import { LOGO_LIGHT_SRC } from '../config';
|
|
12
12
|
import { LOGO_DARK_SRC } from '../config';
|
|
13
|
-
import {
|
|
13
|
+
import { DEFAULT_BOOK_TITLE } from '../config';
|
|
14
14
|
import { MAX_FILENAME_LENGTH } from '../config';
|
|
15
15
|
import { DEFAULT_INTERMEDIATE_FILES_STRATEGY } from '../config';
|
|
16
16
|
import { DEFAULT_MAX_PARALLEL_COUNT } from '../config';
|
|
@@ -31,8 +31,8 @@ import { DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME } from '../config';
|
|
|
31
31
|
import { ORDER_OF_PIPELINE_JSON } from '../constants';
|
|
32
32
|
import { RESERVED_PARAMETER_NAMES } from '../constants';
|
|
33
33
|
import { compilePipeline } from '../conversion/compilePipeline';
|
|
34
|
+
import { parsePipeline } from '../conversion/parsePipeline';
|
|
34
35
|
import { pipelineJsonToString } from '../conversion/pipelineJsonToString';
|
|
35
|
-
import { precompilePipeline } from '../conversion/precompilePipeline';
|
|
36
36
|
import { prettifyPipelineString } from '../conversion/prettify/prettifyPipelineString';
|
|
37
37
|
import { extractParameterNamesFromTask } from '../conversion/utils/extractParameterNamesFromTask';
|
|
38
38
|
import { validatePipeline } from '../conversion/validation/validatePipeline';
|
|
@@ -132,7 +132,7 @@ export { ADMIN_GITHUB_NAME };
|
|
|
132
132
|
export { CLAIM };
|
|
133
133
|
export { LOGO_LIGHT_SRC };
|
|
134
134
|
export { LOGO_DARK_SRC };
|
|
135
|
-
export {
|
|
135
|
+
export { DEFAULT_BOOK_TITLE };
|
|
136
136
|
export { MAX_FILENAME_LENGTH };
|
|
137
137
|
export { DEFAULT_INTERMEDIATE_FILES_STRATEGY };
|
|
138
138
|
export { DEFAULT_MAX_PARALLEL_COUNT };
|
|
@@ -153,8 +153,8 @@ export { DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME };
|
|
|
153
153
|
export { ORDER_OF_PIPELINE_JSON };
|
|
154
154
|
export { RESERVED_PARAMETER_NAMES };
|
|
155
155
|
export { compilePipeline };
|
|
156
|
+
export { parsePipeline };
|
|
156
157
|
export { pipelineJsonToString };
|
|
157
|
-
export { precompilePipeline };
|
|
158
158
|
export { prettifyPipelineString };
|
|
159
159
|
export { extractParameterNamesFromTask };
|
|
160
160
|
export { validatePipeline };
|
|
@@ -58,7 +58,7 @@ export declare const LOGO_DARK_SRC: string_url_image;
|
|
|
58
58
|
*
|
|
59
59
|
* @public exported from `@promptbook/core`
|
|
60
60
|
*/
|
|
61
|
-
export declare const
|
|
61
|
+
export declare const DEFAULT_BOOK_TITLE = "\u2728 Untitled Book";
|
|
62
62
|
/**
|
|
63
63
|
* Warning message for the generated sections and files files
|
|
64
64
|
*
|
|
@@ -5,10 +5,7 @@ import type { PrepareAndScrapeOptions } from '../prepare/PrepareAndScrapeOptions
|
|
|
5
5
|
/**
|
|
6
6
|
* Compile pipeline from string (markdown) format to JSON format
|
|
7
7
|
*
|
|
8
|
-
*
|
|
9
|
-
* - `compilePipeline` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
|
|
10
|
-
* - `precompilePipeline` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
|
|
11
|
-
* - `preparePipeline` - just one step in the compilation process
|
|
8
|
+
* @see https://github.com/webgptorg/promptbook/discussions/196
|
|
12
9
|
*
|
|
13
10
|
* Note: This function does not validate logic of the pipeline only the parsing
|
|
14
11
|
* Note: This function acts as compilation process
|
|
@@ -5,7 +5,7 @@ import type { PipelineString } from '../pipeline/PipelineString';
|
|
|
5
5
|
*
|
|
6
6
|
* Note: There are 3 similar functions:
|
|
7
7
|
* - `compilePipeline` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
|
|
8
|
-
* - `
|
|
8
|
+
* - `parsePipeline` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
|
|
9
9
|
* - `preparePipeline` - just one step in the compilation process
|
|
10
10
|
*
|
|
11
11
|
* Note: This function does not validate logic of the pipeline only the parsing
|
|
@@ -16,7 +16,7 @@ import type { PipelineString } from '../pipeline/PipelineString';
|
|
|
16
16
|
* @throws {ParseError} if the promptbook string is not valid
|
|
17
17
|
* @public exported from `@promptbook/core`
|
|
18
18
|
*/
|
|
19
|
-
export declare function
|
|
19
|
+
export declare function parsePipeline(pipelineString: PipelineString): PipelineJson;
|
|
20
20
|
/**
|
|
21
21
|
* TODO: [🧠] Maybe more things here can be refactored as high-level abstractions
|
|
22
22
|
* TODO: [main] !!!! Warn if used only sync version
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* All high-level abstractions
|
|
3
3
|
*
|
|
4
|
-
* @private internal index of `
|
|
4
|
+
* @private internal index of `parsePipeline` (= used for sync) and `preparePipeline` (= used for async)
|
|
5
5
|
*/
|
|
6
6
|
export declare const HIGH_LEVEL_ABSTRACTIONS: readonly [{
|
|
7
7
|
type: "SYNC";
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { PipelineString } from './PipelineString';
|
|
2
2
|
/**
|
|
3
|
-
*
|
|
3
|
+
* Tag function for notating a pipeline with a book\`...\ notation as template literal
|
|
4
4
|
*
|
|
5
5
|
* @param strings @@@
|
|
6
6
|
* @param values @@@
|
|
@@ -9,7 +9,7 @@ import type { PipelineString } from './PipelineString';
|
|
|
9
9
|
*/
|
|
10
10
|
export declare function book(strings: TemplateStringsArray, ...values: Array<string>): PipelineString;
|
|
11
11
|
/**
|
|
12
|
-
* TODO:
|
|
12
|
+
* TODO: Use ACRY book\`...\ notation instead of as PipelineString
|
|
13
13
|
* TODO: [🧠][🈴] Where is the best location for this file
|
|
14
14
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
15
15
|
*/
|
|
@@ -2,6 +2,8 @@ import type { PipelineJson } from '../pipeline/PipelineJson/PipelineJson';
|
|
|
2
2
|
/**
|
|
3
3
|
* Determine if the pipeline is fully prepared
|
|
4
4
|
*
|
|
5
|
+
* @see https://github.com/webgptorg/promptbook/discussions/196
|
|
6
|
+
*
|
|
5
7
|
* @public exported from `@promptbook/core`
|
|
6
8
|
*/
|
|
7
9
|
export declare function isPipelinePrepared(pipeline: PipelineJson): boolean;
|
|
@@ -4,6 +4,8 @@ import type { PrepareAndScrapeOptions } from './PrepareAndScrapeOptions';
|
|
|
4
4
|
/**
|
|
5
5
|
* Prepare pipeline from string (markdown) format to JSON format
|
|
6
6
|
*
|
|
7
|
+
* @see https://github.com/webgptorg/promptbook/discussions/196
|
|
8
|
+
*
|
|
7
9
|
* Note: This function does not validate logic of the pipeline
|
|
8
10
|
* Note: This function acts as part of compilation process
|
|
9
11
|
* Note: When the pipeline is already prepared, it returns the same pipeline
|
|
@@ -7,7 +7,6 @@ import type { string_url } from '../../types/typeAliases';
|
|
|
7
7
|
import type { ScraperAndConverterMetadata } from './register/ScraperAndConverterMetadata';
|
|
8
8
|
/**
|
|
9
9
|
* @@@
|
|
10
|
-
*
|
|
11
10
|
*/
|
|
12
11
|
export type Scraper = {
|
|
13
12
|
/**
|
|
@@ -53,6 +52,7 @@ export type ScraperSourceHandler = {
|
|
|
53
52
|
asText(): Promisable<string>;
|
|
54
53
|
};
|
|
55
54
|
/**
|
|
55
|
+
* TODO: [🧠] Maybe split `ScraperSourceHandler` into `ScraperWebsiteSourceHandler` + `ScraperFileSourceHandler`
|
|
56
56
|
* TODO: [🥽] Add ` asBlob(): Promisable<Blob>;` or asFile
|
|
57
57
|
* TODO: [🐝] @@@ Annotate all
|
|
58
58
|
* TODO: [🔼] Export via types
|
|
@@ -28,6 +28,8 @@ export type ScraperAndConverterMetadata = Registered & {
|
|
|
28
28
|
readonly mimeTypes: ReadonlyArray<string_mime_type>;
|
|
29
29
|
/**
|
|
30
30
|
* @@@
|
|
31
|
+
*
|
|
32
|
+
* Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
31
33
|
*/
|
|
32
34
|
readonly isAvilableInBrowser: boolean;
|
|
33
35
|
/**
|
|
@@ -37,4 +37,5 @@ export declare class PdfScraper implements Converter, Scraper {
|
|
|
37
37
|
* TODO: [👣] Converted pdf documents can act as cached items - there is no need to run conversion each time
|
|
38
38
|
* TODO: [🪂] Do it in parallel 11:11
|
|
39
39
|
* Note: No need to aggregate usage here, it is done by intercepting the llmTools
|
|
40
|
+
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
40
41
|
*/
|
|
@@ -12,7 +12,7 @@ export declare const createPdfScraper: ((tools: Pick<ExecutionTools, 'llm'>, opt
|
|
|
12
12
|
className: string;
|
|
13
13
|
mimeTypes: string[];
|
|
14
14
|
documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
|
|
15
|
-
isAvilableInBrowser:
|
|
15
|
+
isAvilableInBrowser: false;
|
|
16
16
|
requiredExecutables: never[];
|
|
17
17
|
}>;
|
|
18
18
|
/**
|
|
@@ -10,7 +10,7 @@ export declare const pdfScraperMetadata: import("type-fest/source/readonly-deep"
|
|
|
10
10
|
className: string;
|
|
11
11
|
mimeTypes: string[];
|
|
12
12
|
documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
|
|
13
|
-
isAvilableInBrowser:
|
|
13
|
+
isAvilableInBrowser: false;
|
|
14
14
|
requiredExecutables: never[];
|
|
15
15
|
}>;
|
|
16
16
|
/**
|
|
@@ -12,7 +12,7 @@ export declare const createWebsiteScraper: ((tools: Pick<ExecutionTools, 'llm'>,
|
|
|
12
12
|
className: string;
|
|
13
13
|
mimeTypes: string[];
|
|
14
14
|
documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
|
|
15
|
-
isAvilableInBrowser:
|
|
15
|
+
isAvilableInBrowser: false; /**
|
|
16
16
|
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
17
17
|
*/
|
|
18
18
|
requiredExecutables: never[];
|
|
@@ -10,7 +10,7 @@ export declare const websiteScraperMetadata: import("type-fest/source/readonly-d
|
|
|
10
10
|
className: string;
|
|
11
11
|
mimeTypes: string[];
|
|
12
12
|
documentationUrl: "https://github.com/webgptorg/promptbook/discussions/@@";
|
|
13
|
-
isAvilableInBrowser:
|
|
13
|
+
isAvilableInBrowser: false;
|
|
14
14
|
requiredExecutables: never[];
|
|
15
15
|
}>;
|
|
16
16
|
/**
|
|
@@ -2,7 +2,7 @@ import type { string_markdown } from '../../types/typeAliases';
|
|
|
2
2
|
/**
|
|
3
3
|
* Normalizes the markdown by flattening the structure
|
|
4
4
|
*
|
|
5
|
-
* - It always have h1 - if there is no h1 in the markdown, it will be added
|
|
5
|
+
* - It always have h1 - if there is no h1 in the markdown, it will be added `DEFAULT_BOOK_TITLE`
|
|
6
6
|
* - All other headings are normalized to h2
|
|
7
7
|
*
|
|
8
8
|
* @public exported from `@promptbook/markdown-utils`
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import type { really_any } from './really_any';
|
|
2
|
+
/**
|
|
3
|
+
* Just says that the variable is not used directlys but should be kept because the existence of the variable is important
|
|
4
|
+
*
|
|
5
|
+
* @param value any values
|
|
6
|
+
* @returns void
|
|
7
|
+
* @private within the repository
|
|
8
|
+
*/
|
|
9
|
+
export declare function $sideEffect(...sideEffectSubjects: ReadonlyArray<really_any>): void;
|
|
@@ -8,36 +8,50 @@ import type { InputParameters } from '../types/typeAliases';
|
|
|
8
8
|
import type { string_filename } from '../types/typeAliases';
|
|
9
9
|
import type { string_pipeline_url } from '../types/typeAliases';
|
|
10
10
|
/**
|
|
11
|
+
* Wizzard for simple usage of the Promptbook
|
|
11
12
|
* Look at `wizzard` for more details
|
|
12
13
|
*
|
|
14
|
+
* Note: This works only in Node.js environment and looks for the configuration, environment, tools and cache in the Node.js environment
|
|
15
|
+
*
|
|
13
16
|
* @private just for single instance
|
|
14
17
|
*/
|
|
15
18
|
declare class Wizzard {
|
|
16
19
|
/**
|
|
17
|
-
*
|
|
20
|
+
* Run the book
|
|
21
|
+
*
|
|
22
|
+
* It can be loaded from:
|
|
23
|
+
* 1) As a file ./books/write-cv.book.md
|
|
24
|
+
* 2) As a URL https://promptbook.studio/hejny/write-cv.book.md found in ./books folder recursively
|
|
25
|
+
* 2) As a URL https://promptbook.studio/hejny/write-cv.book.md fetched from the internet
|
|
26
|
+
* 3) As a string
|
|
27
|
+
*
|
|
28
|
+
* Note: This works simmilar to the `ptbk run` command
|
|
18
29
|
*/
|
|
19
|
-
execute(book: string_pipeline_url, inputParameters: InputParameters, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<PipelineExecutorResult>;
|
|
30
|
+
execute(book: string_pipeline_url | string_filename | PipelineString, inputParameters: InputParameters, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<PipelineExecutorResult>;
|
|
20
31
|
private executionTools;
|
|
21
32
|
/**
|
|
22
|
-
*
|
|
33
|
+
* Provides the tools automatically for the Node.js environment
|
|
23
34
|
*
|
|
24
35
|
* @param pipelineSource
|
|
25
36
|
*/
|
|
26
37
|
getExecutionTools(): Promise<Required<Pick<ExecutionTools, 'fs' | 'fetch'>>>;
|
|
27
38
|
/**
|
|
28
|
-
*
|
|
29
|
-
* @@@!!!
|
|
39
|
+
* Load book from the source
|
|
30
40
|
*
|
|
31
|
-
*
|
|
32
|
-
* 1)
|
|
33
|
-
* 2)
|
|
41
|
+
* Pipelines can be loaded from:
|
|
42
|
+
* 1) As a file ./books/write-cv.book.md
|
|
43
|
+
* 2) As a URL https://promptbook.studio/hejny/write-cv.book.md found in ./books folder recursively
|
|
44
|
+
* 2) As a URL https://promptbook.studio/hejny/write-cv.book.md fetched from the internet
|
|
45
|
+
* 3) As a string
|
|
34
46
|
*
|
|
35
47
|
* @param pipelineSource
|
|
36
48
|
*/
|
|
37
49
|
getCompiledBook(pipelineSource: string_filename | string_pipeline_url | PipelineString): Promise<PipelineJson>;
|
|
38
50
|
}
|
|
39
51
|
/**
|
|
40
|
-
*
|
|
52
|
+
* Wizzard for simple usage of the Promptbook
|
|
53
|
+
*
|
|
54
|
+
* Note: This works only in Node.js environment and looks for the configuration, environment, tools and cache in the Node.js environment
|
|
41
55
|
*
|
|
42
56
|
* @singleton
|
|
43
57
|
* @public exported from `@promptbook/wizzard`
|
|
@@ -45,7 +59,5 @@ declare class Wizzard {
|
|
|
45
59
|
export declare const wizzard: Wizzard;
|
|
46
60
|
export {};
|
|
47
61
|
/**
|
|
48
|
-
* TODO: !!!!!! Mark in jsdoc as non-pure
|
|
49
|
-
* TODO: !!!!!! Add to readmes - one markdown here imported in all packages
|
|
50
62
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
51
63
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/pdf",
|
|
3
|
-
"version": "0.81.0-
|
|
3
|
+
"version": "0.81.0-21",
|
|
4
4
|
"description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
|
|
5
5
|
"--note-0": " <- [🐊]",
|
|
6
6
|
"private": false,
|
|
@@ -54,7 +54,7 @@
|
|
|
54
54
|
"module": "./esm/index.es.js",
|
|
55
55
|
"typings": "./esm/typings/src/_packages/pdf.index.d.ts",
|
|
56
56
|
"peerDependencies": {
|
|
57
|
-
"@promptbook/core": "0.81.0-
|
|
57
|
+
"@promptbook/core": "0.81.0-21"
|
|
58
58
|
},
|
|
59
59
|
"dependencies": {
|
|
60
60
|
"crypto-js": "4.2.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -24,7 +24,7 @@
|
|
|
24
24
|
* @generated
|
|
25
25
|
* @see https://github.com/webgptorg/promptbook
|
|
26
26
|
*/
|
|
27
|
-
var PROMPTBOOK_ENGINE_VERSION = '0.81.0-
|
|
27
|
+
var PROMPTBOOK_ENGINE_VERSION = '0.81.0-20';
|
|
28
28
|
/**
|
|
29
29
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
30
30
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -178,7 +178,7 @@
|
|
|
178
178
|
function TODO_USE() {
|
|
179
179
|
}
|
|
180
180
|
|
|
181
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"}];
|
|
181
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book.md",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book.md`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the task:\n\n> {book}\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Title starts with emoticon\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book.md"}];
|
|
182
182
|
|
|
183
183
|
/**
|
|
184
184
|
* Prettify the html code
|
|
@@ -470,6 +470,12 @@
|
|
|
470
470
|
* @public exported from `@promptbook/core`
|
|
471
471
|
*/
|
|
472
472
|
var ADMIN_GITHUB_NAME = 'hejny';
|
|
473
|
+
/**
|
|
474
|
+
* When the title is not provided, the default title is used
|
|
475
|
+
*
|
|
476
|
+
* @public exported from `@promptbook/core`
|
|
477
|
+
*/
|
|
478
|
+
var DEFAULT_BOOK_TITLE = "\u2728 Untitled Book";
|
|
473
479
|
// <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
|
|
474
480
|
/**
|
|
475
481
|
* The maximum number of iterations for a loops
|
|
@@ -1816,11 +1822,16 @@
|
|
|
1816
1822
|
/**
|
|
1817
1823
|
* Determine if the pipeline is fully prepared
|
|
1818
1824
|
*
|
|
1825
|
+
* @see https://github.com/webgptorg/promptbook/discussions/196
|
|
1826
|
+
*
|
|
1819
1827
|
* @public exported from `@promptbook/core`
|
|
1820
1828
|
*/
|
|
1821
1829
|
function isPipelinePrepared(pipeline) {
|
|
1822
1830
|
// Note: Ignoring `pipeline.preparations` @@@
|
|
1823
1831
|
// Note: Ignoring `pipeline.knowledgePieces` @@@
|
|
1832
|
+
if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
|
|
1833
|
+
return false;
|
|
1834
|
+
}
|
|
1824
1835
|
if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
|
|
1825
1836
|
return false;
|
|
1826
1837
|
}
|
|
@@ -3559,7 +3570,7 @@
|
|
|
3559
3570
|
partialPieces = __spreadArray([], __read(partialPiecesUnchecked), false);
|
|
3560
3571
|
return [2 /*return*/, "break"];
|
|
3561
3572
|
}
|
|
3562
|
-
console.warn(spaceTrim__default["default"](function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n
|
|
3573
|
+
console.warn(spaceTrim__default["default"](function (block) { return "\n Cannot scrape knowledge from source despite the scraper `".concat(scraper.metadata.className, "` supports the mime type \"").concat(sourceHandler.mimeType, "\".\n\n The source:\n ").concat(block(knowledgeSource.sourceContent
|
|
3563
3574
|
.split('\n')
|
|
3564
3575
|
.map(function (line) { return "> ".concat(line); })
|
|
3565
3576
|
.join('\n')), "\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
|
|
@@ -3597,7 +3608,7 @@
|
|
|
3597
3608
|
return [7 /*endfinally*/];
|
|
3598
3609
|
case 9:
|
|
3599
3610
|
if (partialPieces === null) {
|
|
3600
|
-
throw new KnowledgeScrapeError(spaceTrim__default["default"](function (block) { return "\n Cannot scrape knowledge\n
|
|
3611
|
+
throw new KnowledgeScrapeError(spaceTrim__default["default"](function (block) { return "\n Cannot scrape knowledge\n\n The source:\n > ".concat(block(knowledgeSource.sourceContent
|
|
3601
3612
|
.split('\n')
|
|
3602
3613
|
.map(function (line) { return "> ".concat(line); })
|
|
3603
3614
|
.join('\n')), "\n\n No scraper found for the mime type \"").concat(sourceHandler.mimeType, "\"\n\n ").concat(block($registeredScrapersMessage(scrapers)), "\n\n\n "); }));
|
|
@@ -3698,6 +3709,8 @@
|
|
|
3698
3709
|
/**
|
|
3699
3710
|
* Prepare pipeline from string (markdown) format to JSON format
|
|
3700
3711
|
*
|
|
3712
|
+
* @see https://github.com/webgptorg/promptbook/discussions/196
|
|
3713
|
+
*
|
|
3701
3714
|
* Note: This function does not validate logic of the pipeline
|
|
3702
3715
|
* Note: This function acts as part of compilation process
|
|
3703
3716
|
* Note: When the pipeline is already prepared, it returns the same pipeline
|
|
@@ -3710,16 +3723,17 @@
|
|
|
3710
3723
|
<- TODO: [🧠][🪑] `promptbookVersion` */
|
|
3711
3724
|
knowledgeSources /*
|
|
3712
3725
|
<- TODO: [🧊] `knowledgePieces` */, personas /*
|
|
3713
|
-
<- TODO: [🧊] `preparations` */, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
|
|
3726
|
+
<- TODO: [🧊] `preparations` */, sources, _llms, llmTools, llmToolsWithUsage, currentPreparation, preparations, title, collection, prepareTitleExecutor, _c, result, outputParameters, titleRaw, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, tasksPrepared /* TODO: parameters: parametersPrepared*/;
|
|
3727
|
+
var _d;
|
|
3714
3728
|
var _this = this;
|
|
3715
|
-
return __generator(this, function (
|
|
3716
|
-
switch (
|
|
3729
|
+
return __generator(this, function (_e) {
|
|
3730
|
+
switch (_e.label) {
|
|
3717
3731
|
case 0:
|
|
3718
3732
|
if (isPipelinePrepared(pipeline)) {
|
|
3719
3733
|
return [2 /*return*/, pipeline];
|
|
3720
3734
|
}
|
|
3721
3735
|
rootDirname = options.rootDirname, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? DEFAULT_MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? DEFAULT_IS_VERBOSE : _b;
|
|
3722
|
-
parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
3736
|
+
parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas, sources = pipeline.sources;
|
|
3723
3737
|
if (tools === undefined || tools.llm === undefined) {
|
|
3724
3738
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
|
3725
3739
|
}
|
|
@@ -3737,6 +3751,40 @@
|
|
|
3737
3751
|
// <- TODO: [🧊]
|
|
3738
3752
|
currentPreparation,
|
|
3739
3753
|
];
|
|
3754
|
+
title = pipeline.title;
|
|
3755
|
+
if (!(title === undefined || title === '' || title === DEFAULT_BOOK_TITLE)) return [3 /*break*/, 3];
|
|
3756
|
+
collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
|
|
3757
|
+
_c = createPipelineExecutor;
|
|
3758
|
+
_d = {};
|
|
3759
|
+
return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-title.book.md')];
|
|
3760
|
+
case 1:
|
|
3761
|
+
prepareTitleExecutor = _c.apply(void 0, [(_d.pipeline = _e.sent(),
|
|
3762
|
+
_d.tools = tools,
|
|
3763
|
+
_d)]);
|
|
3764
|
+
return [4 /*yield*/, prepareTitleExecutor({
|
|
3765
|
+
book: sources
|
|
3766
|
+
.map(function (_a) {
|
|
3767
|
+
var content = _a.content;
|
|
3768
|
+
return content;
|
|
3769
|
+
})
|
|
3770
|
+
.join('\n\n')
|
|
3771
|
+
// TODO: !!!!!!! Parameters in parameters - DO NOT ALLOW, ESCAPE:
|
|
3772
|
+
.split('{')
|
|
3773
|
+
.join('[')
|
|
3774
|
+
.split('}')
|
|
3775
|
+
.join(']'),
|
|
3776
|
+
})];
|
|
3777
|
+
case 2:
|
|
3778
|
+
result = _e.sent();
|
|
3779
|
+
assertsExecutionSuccessful(result);
|
|
3780
|
+
outputParameters = result.outputParameters;
|
|
3781
|
+
titleRaw = outputParameters.title;
|
|
3782
|
+
if (isVerbose) {
|
|
3783
|
+
console.info("The title is \"".concat(titleRaw, "\""));
|
|
3784
|
+
}
|
|
3785
|
+
title = titleRaw || DEFAULT_BOOK_TITLE;
|
|
3786
|
+
_e.label = 3;
|
|
3787
|
+
case 3:
|
|
3740
3788
|
preparedPersonas = new Array(personas.length);
|
|
3741
3789
|
return [4 /*yield*/, forEachAsync(personas, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (persona, index) { return __awaiter(_this, void 0, void 0, function () {
|
|
3742
3790
|
var modelRequirements, preparedPersona;
|
|
@@ -3755,12 +3803,12 @@
|
|
|
3755
3803
|
}
|
|
3756
3804
|
});
|
|
3757
3805
|
}); })];
|
|
3758
|
-
case
|
|
3759
|
-
|
|
3806
|
+
case 4:
|
|
3807
|
+
_e.sent();
|
|
3760
3808
|
knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
|
|
3761
3809
|
return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, __assign(__assign({}, tools), { llm: llmToolsWithUsage }), __assign(__assign({}, options), { rootDirname: rootDirname, maxParallelCount: maxParallelCount /* <- TODO: [🪂] */, isVerbose: isVerbose }))];
|
|
3762
|
-
case
|
|
3763
|
-
partialknowledgePiecesPrepared =
|
|
3810
|
+
case 5:
|
|
3811
|
+
partialknowledgePiecesPrepared = _e.sent();
|
|
3764
3812
|
knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
|
|
3765
3813
|
return [4 /*yield*/, prepareTasks({
|
|
3766
3814
|
parameters: parameters,
|
|
@@ -3771,8 +3819,8 @@
|
|
|
3771
3819
|
maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
|
|
3772
3820
|
isVerbose: isVerbose,
|
|
3773
3821
|
})];
|
|
3774
|
-
case
|
|
3775
|
-
tasksPrepared = (
|
|
3822
|
+
case 6:
|
|
3823
|
+
tasksPrepared = (_e.sent()).tasksPrepared;
|
|
3776
3824
|
// ----- /Tasks preparation -----
|
|
3777
3825
|
// TODO: [😂] Use here all `AsyncHighLevelAbstraction`
|
|
3778
3826
|
// Note: Count total usage
|
|
@@ -3783,7 +3831,7 @@
|
|
|
3783
3831
|
order: ORDER_OF_PIPELINE_JSON,
|
|
3784
3832
|
value: __assign(__assign({}, pipeline), {
|
|
3785
3833
|
// <- TODO: Probbably deeply clone the pipeline because `$exportJson` freezes the subobjects
|
|
3786
|
-
knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
|
|
3834
|
+
title: title, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
|
|
3787
3835
|
// <- TODO: [🪓] Here should be no need for spreading new array, just ` tasks: tasksPrepared`
|
|
3788
3836
|
personas: preparedPersonas, preparations: __spreadArray([], __read(preparations), false) }),
|
|
3789
3837
|
})];
|
|
@@ -5878,6 +5926,7 @@
|
|
|
5878
5926
|
mimeTypes: ['text/markdown', 'text/plain'],
|
|
5879
5927
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
5880
5928
|
isAvilableInBrowser: true,
|
|
5929
|
+
// <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
5881
5930
|
requiredExecutables: [],
|
|
5882
5931
|
}); /* <- Note: [🤛] */
|
|
5883
5932
|
/**
|
|
@@ -6075,7 +6124,8 @@
|
|
|
6075
6124
|
className: 'PdfScraper',
|
|
6076
6125
|
mimeTypes: ['application/pdf'],
|
|
6077
6126
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6078
|
-
isAvilableInBrowser:
|
|
6127
|
+
isAvilableInBrowser: false,
|
|
6128
|
+
// <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6079
6129
|
requiredExecutables: [],
|
|
6080
6130
|
}); /* <- Note: [🤛] */
|
|
6081
6131
|
/**
|
|
@@ -6149,6 +6199,7 @@
|
|
|
6149
6199
|
* TODO: [👣] Converted pdf documents can act as cached items - there is no need to run conversion each time
|
|
6150
6200
|
* TODO: [🪂] Do it in parallel 11:11
|
|
6151
6201
|
* Note: No need to aggregate usage here, it is done by intercepting the llmTools
|
|
6202
|
+
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
6152
6203
|
*/
|
|
6153
6204
|
|
|
6154
6205
|
/**
|