@promptbook/utils 0.68.0-1 → 0.68.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -406,7 +406,7 @@ flowchart LR
406
406
  end;
407
407
  ```
408
408
 
409
- - [More template samples](./samples/templates/)
409
+ - [More template samples](./samples/pipelines/)
410
410
  - [Read more about `.ptbk.md` file format here](https://github.com/webgptorg/promptbook/discussions/categories/concepts?discussions_q=is%3Aopen+label%3A.ptbk.md+category%3AConcepts)
411
411
 
412
412
  _Note: We are using [postprocessing functions](#postprocessing-functions) like `unwrapResult` that can be used to postprocess the result._
@@ -523,7 +523,7 @@ If you have a question [start a discussion](https://github.com/webgptorg/promptb
523
523
 
524
524
  ### Why not just use the OpenAI SDK / Anthropic Claude SDK / ...?
525
525
 
526
- Different levels of abstraction. OpenAI library is for direct use of OpenAI API. This library is for a higher level of abstraction. It is for creating prompt templates and promptbooks that are independent of the underlying library, LLM model, or even LLM provider.
526
+ Different levels of abstraction. OpenAI library is for direct use of OpenAI API. This library is for a higher level of abstraction. It define pipelines that are independent of the underlying library, LLM model, or even LLM provider.
527
527
 
528
528
  ### How is it different from the Langchain library?
529
529
 
package/esm/index.es.js CHANGED
@@ -4,7 +4,7 @@ import spaceTrim$1, { spaceTrim } from 'spacetrim';
4
4
  /**
5
5
  * The version of the Promptbook library
6
6
  */
7
- var PROMPTBOOK_VERSION = '0.68.0-0';
7
+ var PROMPTBOOK_VERSION = '0.68.0-1';
8
8
  // TODO: !!!! List here all the versions and annotate + put into script
9
9
 
10
10
  /*! *****************************************************************************
@@ -726,7 +726,7 @@ function extractVariables(script) {
726
726
  */
727
727
 
728
728
  /**
729
- * Parses the prompt template and returns the set of all used parameters
729
+ * Parses the template and returns the set of all used parameters
730
730
  *
731
731
  * @param template the template with used parameters
732
732
  * @returns the set of parameter names
@@ -803,8 +803,8 @@ var PipelineLogicError = /** @class */ (function (_super) {
803
803
  }(Error));
804
804
 
805
805
  /**
806
- * Function renameParameter will find all usable parameters for given prompt template
807
- * In other words, it will find all parameters that are not used in the prompt template itseld and all its dependencies
806
+ * Function `renameParameter` will find all usable parameters for given template
807
+ * In other words, it will find all parameters that are not used in the template itseld and all its dependencies
808
808
  *
809
809
  * @throws {PipelineLogicError} If the new parameter name is already used in the pipeline
810
810
  * @public exported from `@promptbook/utils`
@@ -1,7 +1,7 @@
1
1
  import type { ExpectationAmount } from '../../types/PipelineJson/Expectations';
2
2
  import type { ExpectationUnit } from '../../types/PipelineJson/Expectations';
3
3
  /**
4
- * Expect amount command describes the desired output of the prompt template (after post-processing)
4
+ * Expect amount command describes the desired output of the template (after post-processing)
5
5
  * It can set limits for the maximum/minimum length of the output, measured in characters, words, sentences, paragraphs,...
6
6
  *
7
7
  * Note: LLMs work with tokens, not characters, but in Promptbooks we want to use some human-recognisable and cross-model interoperable units.
@@ -1,6 +1,6 @@
1
1
  import { COMMANDS } from '../../index';
2
2
  /**
3
- * Command is one piece of the prompt template which adds some logic to the prompt template or the whole pipeline.
3
+ * Command is one piece of the template which adds some logic to the template or the whole pipeline.
4
4
  * It is parsed from the markdown from ul/ol items - one command per one item.
5
5
  */
6
6
  export type Command = ReturnType<typeof COMMANDS[number]['parse']>;
@@ -6,7 +6,7 @@ import type { string_href } from '../../types/typeAliases';
6
6
  */
7
7
  export type renderPipelineMermaidOptions = {
8
8
  /**
9
- * Callback for creating from prompt template graph node
9
+ * Callback for creating from template graph node
10
10
  */
11
11
  linkTemplate?(template: TemplateJson): {
12
12
  href: string_href;
@@ -1,7 +1,7 @@
1
1
  import type { TemplateJson } from '../../types/PipelineJson/TemplateJson';
2
2
  import type { string_parameter_name } from '../../types/typeAliases';
3
3
  /**
4
- * Parses the prompt template and returns the set of all used parameters
4
+ * Parses the template and returns the set of all used parameters
5
5
  *
6
6
  * @param template the template with used parameters
7
7
  * @returns the set of parameter names
@@ -16,8 +16,8 @@ type RenameParameterOptions = {
16
16
  readonly newParameterName: string_name;
17
17
  };
18
18
  /**
19
- * Function renameParameter will find all usable parameters for given prompt template
20
- * In other words, it will find all parameters that are not used in the prompt template itseld and all its dependencies
19
+ * Function `renameParameter` will find all usable parameters for given template
20
+ * In other words, it will find all parameters that are not used in the template itseld and all its dependencies
21
21
  *
22
22
  * @throws {PipelineLogicError} If the new parameter name is already used in the pipeline
23
23
  * @public exported from `@promptbook/utils`
@@ -7,7 +7,7 @@ import type { string_json } from '../../types/typeAliases';
7
7
  * Note: Using here custom import to work in jest tests
8
8
  * Note: Using sync version is 💩 in the production code, but it's ok here in tests
9
9
  *
10
- * @param path - The path to the file relative to samples/templates directory
10
+ * @param path - The path to the file relative to samples/pipelines directory
11
11
  * @private internal function of tests
12
12
  */
13
13
  export declare function importPipelineWithoutPreparation(path: `${string}.ptbk.md`): PipelineString;
@@ -9,7 +9,7 @@ type PrepareTemplateInput = Pick<PipelineJson, 'templates' | 'parameters'> & {
9
9
  };
10
10
  type PreparedTemplates = {
11
11
  /**
12
- * @@@ Sequence of prompt templates that are chained together to form a pipeline
12
+ * @@@ Sequence of templates that are chained together to form a pipeline
13
13
  */
14
14
  readonly templatesPrepared: Array<TemplateJson>;
15
15
  };
@@ -13,7 +13,7 @@ import type { PreparationJson } from './PreparationJson';
13
13
  import type { TemplateJson } from './TemplateJson';
14
14
  /**
15
15
  * Promptbook is the **core concept of this package**.
16
- * It represents a series of prompt templates chained together to form a pipeline / one big prompt template with input and result parameters.
16
+ * It represents a series of templates chained together to form a pipeline / one big template with input and result parameters.
17
17
  *
18
18
  * Note: [🚉] This is fully serializable as JSON
19
19
  *
@@ -27,7 +27,7 @@ export type PipelineJson = {
27
27
  * Note: It must use HTTPs URL
28
28
  * Tip: You can do versioning in the URL
29
29
  * For example: https://promptbook.studio/webgpt/write-website-content-cs.ptbk.md@1.0.0
30
- * Warning: Do not hash part of the URL, hash part is used for identification of the prompt template in the pipeline
30
+ * Warning: Do not hash part of the URL, hash part is used for identification of the template in the pipeline
31
31
  */
32
32
  readonly pipelineUrl?: string_pipeline_url;
33
33
  /**
@@ -57,7 +57,7 @@ export type PipelineJson = {
57
57
  */
58
58
  readonly defaultModelRequirements?: Partial<ModelRequirements>;
59
59
  /**
60
- * Sequence of prompt templates that are chained together to form a pipeline
60
+ * Sequence of templates that are chained together to form a pipeline
61
61
  */
62
62
  readonly templates: Array<TemplateJson>;
63
63
  /**
@@ -10,41 +10,41 @@ import type { string_prompt } from '../typeAliases';
10
10
  import type { string_template } from '../typeAliases';
11
11
  import type { Expectations } from './Expectations';
12
12
  /**
13
- * Common properties of all prompt templates
13
+ * Common properties of all templates
14
14
  */
15
15
  export interface TemplateJsonCommon {
16
16
  /**
17
17
  * Name of the template
18
18
  * - It must be unique across the pipeline
19
19
  * - It should start uppercase and can contain letters and numbers
20
- * - The pipelineUrl together with hash and name are used to identify the prompt template in the pipeline
20
+ * - The pipelineUrl together with hash and name are used to identify the template in the pipeline
21
21
  */
22
22
  readonly name: string_name;
23
23
  /**
24
- * Title of the prompt template
24
+ * Title of the template
25
25
  * It can use simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
26
26
  */
27
27
  readonly title: string;
28
28
  /**
29
- * Description of the prompt template
29
+ * Description of the template
30
30
  * It can use multiple paragraphs of simple markdown formatting like **bold**, *italic*, [link](https://example.com), ... BUT not code blocks and structure
31
31
  */
32
32
  readonly description?: string_markdown_text;
33
33
  /**
34
- * List of parameter names that are used in the prompt template and must be defined before the prompt template is executed
34
+ * List of parameter names that are used in the template and must be defined before the template is executed
35
35
  *
36
36
  * Note: Joker is one of the dependent parameters
37
37
  */
38
38
  readonly dependentParameterNames: Array<string_parameter_name>;
39
39
  /**
40
- * If theese parameters meet the expectations requirements, they are used instead of executing this prompt template
40
+ * If theese parameters meet the expectations requirements, they are used instead of executing this template
41
41
  *
42
42
  * @see https://github.com/webgptorg/promptbook/discussions/66
43
43
  */
44
44
  readonly jokerParameterNames?: Array<string_parameter_name>;
45
45
  /**
46
46
  * Type of the execution
47
- * This determines if the prompt template is send to LLM, user or some scripting evaluation
47
+ * This determines if the template is send to LLM, user or some scripting evaluation
48
48
  */
49
49
  readonly blockType: BlockType;
50
50
  /**
@@ -62,7 +62,7 @@ export interface TemplateJsonCommon {
62
62
  */
63
63
  readonly preparedContent?: (string_prompt | string_javascript | string_markdown) & string_template;
64
64
  /**
65
- * List of postprocessing steps that are executed after the prompt template
65
+ * List of postprocessing steps that are executed after the template
66
66
  *
67
67
  * @see https://github.com/webgptorg/promptbook/discussions/31
68
68
  */
@@ -85,7 +85,7 @@ export interface TemplateJsonCommon {
85
85
  */
86
86
  readonly format?: FormatCommand['format'];
87
87
  /**
88
- * Name of the parameter that is the result of the prompt template
88
+ * Name of the parameter that is the result of the template
89
89
  */
90
90
  readonly resultingParameterName: string_name;
91
91
  }
@@ -1,6 +1,6 @@
1
1
  /**
2
2
  * Promptbook is the **core concept of this package**.
3
- * It represents a series of prompt templates chained together to form a pipeline / one big prompt template with input and result parameters.
3
+ * It represents a series of templates chained together to form a pipeline / one big template with input and result parameters.
4
4
  *
5
5
  * @see @@@ https://github.com/webgptorg/promptbook#promptbook
6
6
  */
@@ -35,7 +35,7 @@ export type ExecutionReportJson = {
35
35
  */
36
36
  readonly description?: string_markdown_text;
37
37
  /**
38
- * Sequence of prompt templates in order which were executed
38
+ * Sequence of templates in order which were executed
39
39
  */
40
40
  readonly promptExecutions: Array<ExecutionPromptReportJson>;
41
41
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/utils",
3
- "version": "0.68.0-1",
3
+ "version": "0.68.0-2",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -12,7 +12,7 @@
12
12
  /**
13
13
  * The version of the Promptbook library
14
14
  */
15
- var PROMPTBOOK_VERSION = '0.68.0-0';
15
+ var PROMPTBOOK_VERSION = '0.68.0-1';
16
16
  // TODO: !!!! List here all the versions and annotate + put into script
17
17
 
18
18
  /*! *****************************************************************************
@@ -734,7 +734,7 @@
734
734
  */
735
735
 
736
736
  /**
737
- * Parses the prompt template and returns the set of all used parameters
737
+ * Parses the template and returns the set of all used parameters
738
738
  *
739
739
  * @param template the template with used parameters
740
740
  * @returns the set of parameter names
@@ -811,8 +811,8 @@
811
811
  }(Error));
812
812
 
813
813
  /**
814
- * Function renameParameter will find all usable parameters for given prompt template
815
- * In other words, it will find all parameters that are not used in the prompt template itseld and all its dependencies
814
+ * Function `renameParameter` will find all usable parameters for given template
815
+ * In other words, it will find all parameters that are not used in the template itseld and all its dependencies
816
816
  *
817
817
  * @throws {PipelineLogicError} If the new parameter name is already used in the pipeline
818
818
  * @public exported from `@promptbook/utils`