@promptbook/remote-server 0.68.0-1 → 0.68.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -284,7 +284,7 @@ flowchart LR
284
284
  end;
285
285
  ```
286
286
 
287
- - [More template samples](./samples/templates/)
287
+ - [More template samples](./samples/pipelines/)
288
288
  - [Read more about `.ptbk.md` file format here](https://github.com/webgptorg/promptbook/discussions/categories/concepts?discussions_q=is%3Aopen+label%3A.ptbk.md+category%3AConcepts)
289
289
 
290
290
  _Note: We are using [postprocessing functions](#postprocessing-functions) like `unwrapResult` that can be used to postprocess the result._
@@ -401,7 +401,7 @@ If you have a question [start a discussion](https://github.com/webgptorg/promptb
401
401
 
402
402
  ### Why not just use the OpenAI SDK / Anthropic Claude SDK / ...?
403
403
 
404
- Different levels of abstraction. OpenAI library is for direct use of OpenAI API. This library is for a higher level of abstraction. It is for creating prompt templates and promptbooks that are independent of the underlying library, LLM model, or even LLM provider.
404
+ Different levels of abstraction. OpenAI library is for direct use of OpenAI API. This library is for a higher level of abstraction. It define pipelines that are independent of the underlying library, LLM model, or even LLM provider.
405
405
 
406
406
  ### How is it different from the Langchain library?
407
407
 
package/esm/index.es.js CHANGED
@@ -7,7 +7,7 @@ import spaceTrim$1, { spaceTrim } from 'spacetrim';
7
7
  /**
8
8
  * The version of the Promptbook library
9
9
  */
10
- var PROMPTBOOK_VERSION = '0.68.0-0';
10
+ var PROMPTBOOK_VERSION = '0.68.0-2';
11
11
  // TODO: !!!! List here all the versions and annotate + put into script
12
12
 
13
13
  /*! *****************************************************************************
@@ -1,7 +1,7 @@
1
1
  import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
2
2
  import type { ExpectationUnit } from '../../types/PipelineJson/Expectations';
3
3
  /**
4
- * Expect amount command describes the desired output of the prompt template (after post-processing)
4
+ * Expect amount command describes the desired output of the template (after post-processing)
5
5
  * It can set limits for the maximum/minimum length of the output, measured in characters, words, sentences, paragraphs,...
6
6
  *
7
7
  * Note: LLMs work with tokens, not characters, but in Promptbooks we want to use some human-recognisable and cross-model interoperable units.
@@ -1,6 +1,6 @@
1
1
  import { COMMANDS } from '../../index';
2
2
  /**
3
- * Command is one piece of the prompt template which adds some logic to the prompt template or the whole pipeline.
3
+ * Command is one piece of the template which adds some logic to the template or the whole pipeline.
4
4
  * It is parsed from the markdown from ul/ol items - one command per one item.
5
5
  */
6
6
  export type Command = ReturnType<typeof COMMANDS[number]['parse']>;
@@ -6,7 +6,7 @@ import type { string_href } from '../../types/typeAliases';
6
6
  */
7
7
  export type renderPipelineMermaidOptions = {
8
8
  /**
9
- * Callback for creating from prompt template graph node
9
+ * Callback for creating from template graph node
10
10
  */
11
11
  linkTemplate?(template: TemplateJson): {
12
12
  href: string_href;
@@ -1,7 +1,7 @@
1
1
  import type { TemplateJson } from '../../types/PipelineJson/TemplateJson';
2
2
  import type { string_parameter_name } from '../../types/typeAliases';
3
3
  /**
4
- * Parses the prompt template and returns the set of all used parameters
4
+ * Parses the template and returns the set of all used parameters
5
5
  *
6
6
  * @param template the template with used parameters
7
7
  * @returns the set of parameter names
@@ -16,8 +16,8 @@ type RenameParameterOptions = {
16
16
  readonly newParameterName: string_name;
17
17
  };
18
18
  /**
19
- * Function renameParameter will find all usable parameters for given prompt template
20
- * In other words, it will find all parameters that are not used in the prompt template itseld and all its dependencies
19
+ * Function `renameParameter` will find all usable parameters for given template
20
+ * In other words, it will find all parameters that are not used in the template itseld and all its dependencies
21
21
  *
22
22
  * @throws {PipelineLogicError} If the new parameter name is already used in the pipeline
23
23
  * @public exported from `@promptbook/utils`
@@ -7,7 +7,7 @@ import type { string_json } from '../../types/typeAliases';
7
7
  * Note: Using here custom import to work in jest tests
8
8
  * Note: Using sync version is 💩 in the production code, but it's ok here in tests
9
9
  *
10
- * @param path - The path to the file relative to samples/templates directory
10
+ * @param path - The path to the file relative to samples/pipelines directory
11
11
  * @private internal function of tests
12
12
  */
13
13
  export declare function importPipelineWithoutPreparation(path: `${string}.ptbk.md`): PipelineString;
@@ -9,7 +9,7 @@ type PrepareTemplateInput = Pick<PipelineJson, 'templates' | 'parameters'> & {
9
9
  };
10
10
  type PreparedTemplates = {
11
11
  /**
12
- * @@@ Sequence of prompt templates that are chained together to form a pipeline
12
+ * @@@ Sequence of templates that are chained together to form a pipeline
13
13
  */
14
14
  readonly templatesPrepared: Array<TemplateJson>;
15
15
  };
@@ -13,7 +13,7 @@ import type { PreparationJson } from './PreparationJson';
13
13
  import type { TemplateJson } from './TemplateJson';
14
14
  /**
15
15
  * Promptbook is the **core concept of this package**.
16
- * It represents a series of prompt templates chained together to form a pipeline / one big prompt template with input and result parameters.
16
+ * It represents a series of templates chained together to form a pipeline / one big template with input and result parameters.
17
17
  *
18
18
  * Note: [🚉] This is fully serializable as JSON
19
19
  *
@@ -27,7 +27,7 @@ export type PipelineJson = {
27
27
  * Note: It must use HTTPs URL
28
28
  * Tip: You can do versioning in the URL
29
29
  * For example: https://promptbook.studio/webgpt/write-website-content-cs.ptbk.md@1.0.0
30
- * Warning: Do not hash part of the URL, hash part is used for identification of the prompt template in the pipeline
30
+ * Warning: Do not hash part of the URL, hash part is used for identification of the template in the pipeline
31
31
  */
32
32
  readonly pipelineUrl?: string_pipeline_url;
33
33
  /**
@@ -57,7 +57,7 @@ export type PipelineJson = {
57
57
  */
58
58
  readonly defaultModelRequirements?: Partial<ModelRequirements>;
59
59
  /**
60
- * Sequence of prompt templates that are chained together to form a pipeline
60
+ * Sequence of templates that are chained together to form a pipeline
61
61
  */
62
62
  readonly templates: Array<TemplateJson>;
63
63
  /**
@@ -10,41 +10,41 @@ import type { string_prompt } from '../typeAliases';
10
10
  import type { string_template } from '../typeAliases';
11
11
  import type { Expectations } from './Expectations';
12
12
  /**
13
- * Common properties of all prompt templates
13
+ * Common properties of all templates
14
14
  */
15
15
  export interface TemplateJsonCommon {
16
16
  /**
17
17
  * Name of the template
18
18
  * - It must be unique across the pipeline
19
19
  * - It should start uppercase and can contain letters and numbers
20
- * - The pipelineUrl together with hash and name are used to identify the prompt template in the pipeline
20
+ * - The pipelineUrl together with hash and name are used to identify the template in the pipeline
21
21
  */
22
22
  readonly name: string_name;
23
23
  /**
24
- * Title of the prompt template
24
+ * Title of the template
25
25
  * It can use simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
26
26
  */
27
27
  readonly title: string;
28
28
  /**
29
- * Description of the prompt template
29
+ * Description of the template
30
30
  * It can use multiple paragraphs of simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
31
31
  */
32
32
  readonly description?: string_markdown_text;
33
33
  /**
34
- * List of parameter names that are used in the prompt template and must be defined before the prompt template is executed
34
+ * List of parameter names that are used in the template and must be defined before the template is executed
35
35
  *
36
36
  * Note: Joker is one of the dependent parameters
37
37
  */
38
38
  readonly dependentParameterNames: Array<string_parameter_name>;
39
39
  /**
40
- * If theese parameters meet the expectations requirements, they are used instead of executing this prompt template
40
+ * If theese parameters meet the expectations requirements, they are used instead of executing this template
41
41
  *
42
42
  * @see https://github.com/webgptorg/promptbook/discussions/66
43
43
  */
44
44
  readonly jokerParameterNames?: Array<string_parameter_name>;
45
45
  /**
46
46
  * Type of the execution
47
- * This determines if the prompt template is send to LLM, user or some scripting evaluation
47
+ * This determines if the template is send to LLM, user or some scripting evaluation
48
48
  */
49
49
  readonly blockType: BlockType;
50
50
  /**
@@ -62,7 +62,7 @@ export interface TemplateJsonCommon {
62
62
  */
63
63
  readonly preparedContent?: (string_prompt | string_javascript | string_markdown) & string_template;
64
64
  /**
65
- * List of postprocessing steps that are executed after the prompt template
65
+ * List of postprocessing steps that are executed after the template
66
66
  *
67
67
  * @see https://github.com/webgptorg/promptbook/discussions/31
68
68
  */
@@ -85,7 +85,7 @@ export interface TemplateJsonCommon {
85
85
  */
86
86
  readonly format?: FormatCommand['format'];
87
87
  /**
88
- * Name of the parameter that is the result of the prompt template
88
+ * Name of the parameter that is the result of the template
89
89
  */
90
90
  readonly resultingParameterName: string_name;
91
91
  }
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * Promptbook is the **core concept of this package**.
3
- * It represents a series of prompt templates chained together to form a pipeline / one big prompt template with input and result parameters.
3
+ * It represents a series of templates chained together to form a pipeline / one big template with input and result parameters.
4
4
  *
5
5
  * @see @@@ https://github.com/webgptorg/promptbook#promptbook
6
6
  */
@@ -35,7 +35,7 @@ export type ExecutionReportJson = {
35
35
  */
36
36
  readonly description?: string_markdown_text;
37
37
  /**
38
- * Sequence of prompt templates in order which were executed
38
+ * Sequence of templates in order which were executed
39
39
  */
40
40
  readonly promptExecutions: Array<ExecutionPromptReportJson>;
41
41
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.68.0-1",
3
+ "version": "0.68.0-3",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.68.0-1"
50
+ "@promptbook/core": "0.68.0-3"
51
51
  },
52
52
  "dependencies": {
53
53
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -14,7 +14,7 @@
14
14
  /**
15
15
  * The version of the Promptbook library
16
16
  */
17
- var PROMPTBOOK_VERSION = '0.68.0-0';
17
+ var PROMPTBOOK_VERSION = '0.68.0-2';
18
18
  // TODO: !!!! List here all the versions and annotate + put into script
19
19
 
20
20
  /*! *****************************************************************************