@promptbook/remote-server 0.85.0-9 → 0.86.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,7 +5,7 @@
5
5
  */
6
6
  export declare function promptbookCli(): Promise<void>;
7
7
  /**
8
- * TODO: [🧠] Maybe `run` command the default, instead of `ptbk run ./foo.book.md` -> `ptbk ./foo.book.md`
8
+ * TODO: [🧠] Maybe `run` command the default, instead of `ptbk run ./foo.book` -> `ptbk ./foo.book`
9
9
  * TODO: [🥠] Do not export, its just for CLI script
10
10
  * TODO: [🕌] When more functionalities, rename
11
11
  * Note: 11:11
@@ -1,6 +1,6 @@
1
1
  export {};
2
2
  /**
3
- * Note: [🐠] For example here URL https://example.com/pipeline.book.md is not valid
3
+ * Note: [🐠] For example here URL https://example.com/pipeline.book is not valid
4
4
  * because it is on private network BUT its very hard to debug because
5
5
  * there is no error message and false return (the error) happen deep in:
6
6
  * `isValidPipelineUrl` -> `isValidPipelineUrl` -> `isUrlOnPrivateNetwork`
@@ -6,7 +6,7 @@ import type { PipelineCollection } from '../PipelineCollection';
6
6
  /**
7
7
  * Options for `createCollectionFromDirectory` function
8
8
  *
9
- * Note: `rootDirname` is not needed because it is the folder in which `.book.md` file is located
9
+ * Note: `rootDirname` is not needed because it is the folder in which `.book` or `.book` file is located
10
10
  * This is not same as `path` which is the first argument of `createCollectionFromDirectory` - it can be a subfolder
11
11
  */
12
12
  type CreatePipelineCollectionFromDirectoryOptions = Omit<PrepareAndScrapeOptions, 'rootDirname'> & {
@@ -51,13 +51,13 @@ type CreatePipelineCollectionFromDirectoryOptions = Omit<PrepareAndScrapeOptions
51
51
  *
52
52
  * Note: Works only in Node.js environment because it reads the file system
53
53
  *
54
- * @param path - path to the directory with pipelines
54
+ * @param rootPath - path to the directory with pipelines
55
55
  * @param tools - Execution tools to be used for pipeline preparation if needed - If not provided, `$provideExecutionToolsForNode` will be used
56
56
  * @param options - Options for the collection creation
57
57
  * @returns PipelineCollection
58
58
  * @public exported from `@promptbook/node`
59
59
  */
60
- export declare function createCollectionFromDirectory(path: string_dirname, tools?: Pick<ExecutionTools, 'llm' | 'fs' | 'scrapers'>, options?: CreatePipelineCollectionFromDirectoryOptions): Promise<PipelineCollection>;
60
+ export declare function createCollectionFromDirectory(rootPath: string_dirname, tools?: Pick<ExecutionTools, 'llm' | 'fs' | 'scrapers'>, options?: CreatePipelineCollectionFromDirectoryOptions): Promise<PipelineCollection>;
61
61
  export {};
62
62
  /**
63
63
  * TODO: [🖇] What about symlinks? Maybe option isSymlinksFollowed
@@ -3,7 +3,7 @@ import type { ForeachCommand } from './ForeachCommand';
3
3
  /**
4
4
  * Parses the foreach command
5
5
  *
6
- * Note: @@@ This command is used as foreach for new commands - it should NOT be used in any `.book.md` file
6
+ * Note: @@@ This command is used as foreach for new commands - it should NOT be used in any `.book` file
7
7
  *
8
8
  * @see `documentationUrl` for more details
9
9
  * @public exported from `@promptbook/editable`
@@ -3,7 +3,7 @@ import type { FormfactorCommand } from './FormfactorCommand';
3
3
  /**
4
4
  * Parses the formfactor command
5
5
  *
6
- * Note: @@@ This command is used as formfactor for new commands - it should NOT be used in any `.book.md` file
6
+ * Note: @@@ This command is used as formfactor for new commands - it should NOT be used in any `.book` file
7
7
  *
8
8
  * @see `documentationUrl` for more details
9
9
  * @public exported from `@promptbook/editable`
@@ -3,7 +3,7 @@ import type { BoilerplateCommand } from './BoilerplateCommand';
3
3
  /**
4
4
  * Parses the boilerplate command
5
5
  *
6
- * Note: @@@ This command is used as boilerplate for new commands - it should NOT be used in any `.book.md` file
6
+ * Note: @@@ This command is used as boilerplate for new commands - it should NOT be used in any `.book` file
7
7
  *
8
8
  * @see `documentationUrl` for more details
9
9
  * @private within the commands folder
@@ -2,7 +2,7 @@ import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
2
2
  import type { PipelineString } from '../../pipeline/PipelineString';
3
3
  import type { string_json } from '../../types/typeAliases';
4
4
  /**
5
- * Import the pipeline.book.md or pipeline.book.json file
5
+ * Import the pipeline.book or pipeline.book.json file
6
6
  *
7
7
  * Note: Using here custom import to work in jest tests
8
8
  * Note: Using sync version is 💩 in the production code, but it's ok here in tests
@@ -10,7 +10,7 @@ import type { string_json } from '../../types/typeAliases';
10
10
  * @param path - The path to the file relative to examples/pipelines directory
11
11
  * @private internal function of tests
12
12
  */
13
- export declare function importPipelineWithoutPreparation(path: `${string}.book.md`): PipelineString;
13
+ export declare function importPipelineWithoutPreparation(path: `${string}.book`): PipelineString;
14
14
  export declare function importPipelineWithoutPreparation(path: `${string}.book.json`): PipelineJson;
15
15
  /**
16
16
  * Import the pipeline.book.json file as parsed JSON
@@ -29,12 +29,12 @@ export type PipelineJson = {
29
29
  * Note: It must be unique across all pipeline collections
30
30
  * Note: It must use HTTPs URL
31
31
  * Tip: You can do versioning in the URL
32
- * For example: https://promptbook.studio/webgpt/write-website-content-cs.book.md@1.0.0
32
+ * For example: https://promptbook.studio/webgpt/write-website-content-cs.book@1.0.0
33
33
  * Warning: Do not hash part of the URL, hash part is used for identification of the task in the pipeline
34
34
  */
35
35
  readonly pipelineUrl?: string_pipeline_url;
36
36
  /**
37
- * Internal helper for tracking the source `.book.md` file of the pipeline
37
+ * Internal helper for tracking the source `.book` file of the pipeline
38
38
  */
39
39
  readonly sourceFile?: string_filename;
40
40
  /**
@@ -95,7 +95,7 @@ export type CommonPrompt = {
95
95
  /**
96
96
  * Unique identifier of the pipeline with specific task name as hash
97
97
  *
98
- * @example https://promptbook.studio/webgpt/write-website-content-cs.book.md#keywords
98
+ * @example https://promptbook.studio/webgpt/write-website-content-cs.book#keywords
99
99
  */
100
100
  readonly pipelineUrl?: string_pipeline_url_with_task_hash;
101
101
  /**
@@ -328,13 +328,13 @@ export type string_pipeline_root_url = string;
328
328
  /**
329
329
  * Semantic helper
330
330
  *
331
- * For example `"https://promptbook.studio/webgpt/write-website-content-cs.book.md"`
331
+ * For example `"https://promptbook.studio/webgpt/write-website-content-cs.book"`
332
332
  */
333
333
  export type string_pipeline_url = string;
334
334
  /**
335
335
  * Semantic helper
336
336
  *
337
- * For example `"https://promptbook.studio/webgpt/write-website-content-cs.book.md#keywords"`
337
+ * For example `"https://promptbook.studio/webgpt/write-website-content-cs.book#keywords"`
338
338
  */
339
339
  export type string_pipeline_url_with_task_hash = string;
340
340
  /**
@@ -9,7 +9,7 @@ import type { string_json } from '../../../types/typeAliases';
9
9
  */
10
10
  export declare function stringifyPipelineJson<TType>(pipeline: TType): string_json<TType>;
11
11
  /**
12
- * TODO: [🐝] Not Working propperly @see https://promptbook.studio/examples/mixed-knowledge.book.md
12
+ * TODO: [🐝] Not Working propperly @see https://promptbook.studio/examples/mixed-knowledge.book
13
13
  * TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
14
14
  * TODO: [🧠] Maybe more elegant solution than replacing via regex
15
15
  * TODO: [🍙] Make some standard order of json properties
@@ -19,9 +19,9 @@ declare class Wizzard {
19
19
  * Run the book
20
20
  *
21
21
  * It can be loaded from:
22
- * 1) As a file ./books/write-cv.book.md
23
- * 2) As a URL https://promptbook.studio/hejny/write-cv.book.md found in ./books folder recursively
24
- * 2) As a URL https://promptbook.studio/hejny/write-cv.book.md fetched from the internet
22
+ * 1) As a file ./books/write-cv.book
23
+ * 2) As a URL https://promptbook.studio/hejny/write-cv.book found in ./books folder recursively
24
+ * 2) As a URL https://promptbook.studio/hejny/write-cv.book fetched from the internet
25
25
  * 3) As a string
26
26
  *
27
27
  * Note: This works simmilar to the `ptbk run` command
@@ -43,9 +43,9 @@ declare class Wizzard {
43
43
  * Load book from the source
44
44
  *
45
45
  * Pipelines can be loaded from:
46
- * 1) As a file ./books/write-cv.book.md
47
- * 2) As a URL https://promptbook.studio/hejny/write-cv.book.md found in ./books folder recursively
48
- * 2) As a URL https://promptbook.studio/hejny/write-cv.book.md fetched from the internet
46
+ * 1) As a file ./books/write-cv.book
47
+ * 2) As a URL https://promptbook.studio/hejny/write-cv.book found in ./books folder recursively
48
+ * 2) As a URL https://promptbook.studio/hejny/write-cv.book fetched from the internet
49
49
  * 3) As a string
50
50
  *
51
51
  * @param pipelineSource
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.85.0-9",
3
+ "version": "0.86.0-2",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -42,12 +42,12 @@
42
42
  "bugs": {
43
43
  "url": "https://github.com/webgptorg/promptbook/issues"
44
44
  },
45
- "homepage": "https://www.npmjs.com/package/@promptbook/core",
45
+ "homepage": "https://ptbk.io/",
46
46
  "main": "./umd/index.umd.js",
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.85.0-9"
50
+ "@promptbook/core": "0.86.0-2"
51
51
  },
52
52
  "dependencies": {
53
53
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -28,7 +28,7 @@
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- var PROMPTBOOK_ENGINE_VERSION = '0.85.0-8';
31
+ var PROMPTBOOK_ENGINE_VERSION = '0.86.0-1';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1488,57 +1488,6 @@
1488
1488
  return true;
1489
1489
  }
1490
1490
 
1491
- /**
1492
- * Checks if an URL is reserved for private networks or localhost.
1493
- *
1494
- * Note: There are two simmilar functions:
1495
- * - `isUrlOnPrivateNetwork` which tests full URL
1496
- * - `isHostnameOnPrivateNetwork` *(this one)* which tests just hostname
1497
- *
1498
- * @public exported from `@promptbook/utils`
1499
- */
1500
- function isHostnameOnPrivateNetwork(hostname) {
1501
- if (hostname === 'example.com' ||
1502
- hostname === 'localhost' ||
1503
- hostname.endsWith('.localhost') ||
1504
- hostname.endsWith('.local') ||
1505
- hostname.endsWith('.test') ||
1506
- hostname === '127.0.0.1' ||
1507
- hostname === '::1') {
1508
- return true;
1509
- }
1510
- if (hostname.includes(':')) {
1511
- // IPv6
1512
- var ipParts = hostname.split(':');
1513
- return ipParts[0] === 'fc00' || ipParts[0] === 'fd00' || ipParts[0] === 'fe80';
1514
- }
1515
- else {
1516
- // IPv4
1517
- var ipParts = hostname.split('.').map(function (part) { return Number.parseInt(part, 10); });
1518
- return (ipParts[0] === 10 ||
1519
- (ipParts[0] === 172 && ipParts[1] >= 16 && ipParts[1] <= 31) ||
1520
- (ipParts[0] === 192 && ipParts[1] === 168));
1521
- }
1522
- }
1523
-
1524
- /**
1525
- * Checks if an IP address or hostname is reserved for private networks or localhost.
1526
- *
1527
- * Note: There are two simmilar functions:
1528
- * - `isUrlOnPrivateNetwork` *(this one)* which tests full URL
1529
- * - `isHostnameOnPrivateNetwork` which tests just hostname
1530
- *
1531
- * @param {string} ipAddress - The IP address to check.
1532
- * @returns {boolean} Returns true if the IP address is reserved for private networks or localhost, otherwise false.
1533
- * @public exported from `@promptbook/utils`
1534
- */
1535
- function isUrlOnPrivateNetwork(url) {
1536
- if (typeof url === 'string') {
1537
- url = new URL(url);
1538
- }
1539
- return isHostnameOnPrivateNetwork(url.hostname);
1540
- }
1541
-
1542
1491
  /**
1543
1492
  * Tests if given string is valid URL.
1544
1493
  *
@@ -1581,16 +1530,19 @@
1581
1530
  if (!isValidUrl(url)) {
1582
1531
  return false;
1583
1532
  }
1584
- if (!url.startsWith('https://')) {
1533
+ if (!url.startsWith('https://') && !url.startsWith('http://') /* <- Note: [👣] */) {
1585
1534
  return false;
1586
1535
  }
1587
1536
  if (url.includes('#')) {
1588
1537
  // TODO: [🐠]
1589
1538
  return false;
1590
1539
  }
1540
+ /*
1541
+ Note: [👣][🧠] Is it secure to allow pipeline URLs on private and unsecured networks?
1591
1542
  if (isUrlOnPrivateNetwork(url)) {
1592
1543
  return false;
1593
1544
  }
1545
+ */
1594
1546
  return true;
1595
1547
  }
1596
1548
  /**
@@ -2062,7 +2014,7 @@
2062
2014
  * TODO: [🐚] Split into more files and make `PrepareTask` & `RemoteTask` + split the function
2063
2015
  */
2064
2016
 
2065
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book.md",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book.md`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book.md"}];
2017
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
2066
2018
 
2067
2019
  /**
2068
2020
  * Checks if value is valid email
@@ -3217,7 +3169,7 @@
3217
3169
  collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
3218
3170
  _b = createPipelineExecutor;
3219
3171
  _c = {};
3220
- return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book.md')];
3172
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book')];
3221
3173
  case 1:
3222
3174
  preparePersonaExecutor = _b.apply(void 0, [(_c.pipeline = _d.sent(),
3223
3175
  _c.tools = tools,
@@ -4435,7 +4387,7 @@
4435
4387
  collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
4436
4388
  _c = createPipelineExecutor;
4437
4389
  _d = {};
4438
- return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-title.book.md')];
4390
+ return [4 /*yield*/, collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-title.book')];
4439
4391
  case 1:
4440
4392
  prepareTitleExecutor = _c.apply(void 0, [(_d.pipeline = _e.sent(),
4441
4393
  _d.tools = tools,
@@ -7014,6 +6966,46 @@
7014
6966
  }
7015
6967
  });
7016
6968
  }); });
6969
+ // TODO: [🧠] Is it secure / good idea to expose source codes of hosted books
6970
+ app.get("".concat(rootPath, "/books/*"), function (request, response) { return __awaiter(_this, void 0, void 0, function () {
6971
+ var pipelines, fullUrl, pipelineUrl, pipeline, source, error_1;
6972
+ return __generator(this, function (_a) {
6973
+ switch (_a.label) {
6974
+ case 0:
6975
+ _a.trys.push([0, 3, , 4]);
6976
+ if (collection === null) {
6977
+ response.status(500).send('No collection nor books available');
6978
+ return [2 /*return*/];
6979
+ }
6980
+ return [4 /*yield*/, collection.listPipelines()];
6981
+ case 1:
6982
+ pipelines = _a.sent();
6983
+ fullUrl = request.protocol + '://' + request.get('host') + request.originalUrl;
6984
+ pipelineUrl = pipelines.find(function (pipelineUrl) { return pipelineUrl.endsWith(request.originalUrl); }) || fullUrl;
6985
+ return [4 /*yield*/, collection.getPipelineByUrl(pipelineUrl)];
6986
+ case 2:
6987
+ pipeline = _a.sent();
6988
+ source = pipeline.sources[0];
6989
+ if (source === undefined || source.type !== 'BOOK') {
6990
+ throw new Error('Pipeline source is not a book');
6991
+ }
6992
+ response
6993
+ .type('text/markdown')
6994
+ .send(source.content);
6995
+ return [3 /*break*/, 4];
6996
+ case 3:
6997
+ error_1 = _a.sent();
6998
+ if (!(error_1 instanceof Error)) {
6999
+ throw error_1;
7000
+ }
7001
+ response
7002
+ .status(404)
7003
+ .send({ error: serializeError(error_1) });
7004
+ return [3 /*break*/, 4];
7005
+ case 4: return [2 /*return*/];
7006
+ }
7007
+ });
7008
+ }); });
7017
7009
  app.get("".concat(rootPath, "/executions"), function (request, response) { return __awaiter(_this, void 0, void 0, function () {
7018
7010
  return __generator(this, function (_a) {
7019
7011
  response.send(runningExecutionTasks);
@@ -7026,7 +7018,9 @@
7026
7018
  taskId = request.params.taskId;
7027
7019
  execution = runningExecutionTasks.find(function (executionTask) { return executionTask.taskId === taskId; });
7028
7020
  if (execution === undefined) {
7029
- response.status(404).send("Execution \"".concat(taskId, "\" not found"));
7021
+ response
7022
+ .status(404)
7023
+ .send("Execution \"".concat(taskId, "\" not found"));
7030
7024
  return [2 /*return*/];
7031
7025
  }
7032
7026
  response.send(execution.currentValue);
@@ -7034,7 +7028,7 @@
7034
7028
  });
7035
7029
  }); });
7036
7030
  app.post("".concat(rootPath, "/executions/new"), function (request, response) { return __awaiter(_this, void 0, void 0, function () {
7037
- var _a, inputParameters, identification, pipelineUrl, pipeline, tools, pipelineExecutor, executionTask, error_1;
7031
+ var _a, inputParameters, identification, pipelineUrl, pipeline, tools, pipelineExecutor, executionTask, error_2;
7038
7032
  return __generator(this, function (_b) {
7039
7033
  switch (_b.label) {
7040
7034
  case 0:
@@ -7062,11 +7056,11 @@
7062
7056
  response.send(executionTask);
7063
7057
  return [3 /*break*/, 5];
7064
7058
  case 4:
7065
- error_1 = _b.sent();
7066
- if (!(error_1 instanceof Error)) {
7067
- throw error_1;
7059
+ error_2 = _b.sent();
7060
+ if (!(error_2 instanceof Error)) {
7061
+ throw error_2;
7068
7062
  }
7069
- response.status(400).send({ error: serializeError(error_1) });
7063
+ response.status(400).send({ error: serializeError(error_2) });
7070
7064
  return [3 /*break*/, 5];
7071
7065
  case 5: return [2 /*return*/];
7072
7066
  }
@@ -7087,7 +7081,7 @@
7087
7081
  }
7088
7082
  // -----------
7089
7083
  socket.on('prompt-request', function (request) { return __awaiter(_this, void 0, void 0, function () {
7090
- var identification, prompt, tools, llm, _a, promptResult, _b, error_2;
7084
+ var identification, prompt, tools, llm, _a, promptResult, _b, error_3;
7091
7085
  return __generator(this, function (_c) {
7092
7086
  switch (_c.label) {
7093
7087
  case 0:
@@ -7156,11 +7150,11 @@
7156
7150
  socket.emit('prompt-response', { promptResult: promptResult } /* <- Note: [🤛] */);
7157
7151
  return [3 /*break*/, 15];
7158
7152
  case 13:
7159
- error_2 = _c.sent();
7160
- if (!(error_2 instanceof Error)) {
7161
- throw error_2;
7153
+ error_3 = _c.sent();
7154
+ if (!(error_3 instanceof Error)) {
7155
+ throw error_3;
7162
7156
  }
7163
- socket.emit('error', serializeError(error_2) /* <- Note: [🤛] */);
7157
+ socket.emit('error', serializeError(error_3) /* <- Note: [🤛] */);
7164
7158
  return [3 /*break*/, 15];
7165
7159
  case 14:
7166
7160
  socket.disconnect();
@@ -7172,7 +7166,7 @@
7172
7166
  // -----------
7173
7167
  // TODO: [👒] Listing models (and checking configuration) probbably should go through REST API not Socket.io
7174
7168
  socket.on('listModels-request', function (request) { return __awaiter(_this, void 0, void 0, function () {
7175
- var identification, tools, llm, models, error_3;
7169
+ var identification, tools, llm, models, error_4;
7176
7170
  return __generator(this, function (_a) {
7177
7171
  switch (_a.label) {
7178
7172
  case 0:
@@ -7193,11 +7187,11 @@
7193
7187
  socket.emit('listModels-response', { models: models } /* <- Note: [🤛] */);
7194
7188
  return [3 /*break*/, 6];
7195
7189
  case 4:
7196
- error_3 = _a.sent();
7197
- if (!(error_3 instanceof Error)) {
7198
- throw error_3;
7190
+ error_4 = _a.sent();
7191
+ if (!(error_4 instanceof Error)) {
7192
+ throw error_4;
7199
7193
  }
7200
- socket.emit('error', serializeError(error_3));
7194
+ socket.emit('error', serializeError(error_4));
7201
7195
  return [3 /*break*/, 6];
7202
7196
  case 5:
7203
7197
  socket.disconnect();
@@ -7209,7 +7203,7 @@
7209
7203
  // -----------
7210
7204
  // TODO: [👒] Listing models (and checking configuration) probbably should go through REST API not Socket.io
7211
7205
  socket.on('preparePipeline-request', function (request) { return __awaiter(_this, void 0, void 0, function () {
7212
- var identification, pipeline, tools, preparedPipeline, error_4;
7206
+ var identification, pipeline, tools, preparedPipeline, error_5;
7213
7207
  return __generator(this, function (_a) {
7214
7208
  switch (_a.label) {
7215
7209
  case 0:
@@ -7229,11 +7223,11 @@
7229
7223
  socket.emit('preparePipeline-response', { preparedPipeline: preparedPipeline } /* <- Note: [🤛] */);
7230
7224
  return [3 /*break*/, 6];
7231
7225
  case 4:
7232
- error_4 = _a.sent();
7233
- if (!(error_4 instanceof Error)) {
7234
- throw error_4;
7226
+ error_5 = _a.sent();
7227
+ if (!(error_5 instanceof Error)) {
7228
+ throw error_5;
7235
7229
  }
7236
- socket.emit('error', serializeError(error_4));
7230
+ socket.emit('error', serializeError(error_5));
7237
7231
  return [3 /*break*/, 6];
7238
7232
  case 5:
7239
7233
  socket.disconnect();