@promptbook/openai 0.61.0-14 → 0.61.0-16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/README.md +2 -2
  2. package/esm/index.es.js +148 -10
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/promptbook-collection/index.d.ts +12 -15
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -1
  6. package/esm/typings/src/_packages/types.index.d.ts +2 -3
  7. package/esm/typings/src/_packages/utils.index.d.ts +4 -3
  8. package/esm/typings/src/commands/KNOWLEDGE/KnowledgeCommand.d.ts +2 -3
  9. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +28 -3
  10. package/esm/typings/src/config.d.ts +26 -1
  11. package/esm/typings/src/config.test.d.ts +4 -0
  12. package/esm/typings/src/conversion/utils/extractParametersFromPromptTemplate.d.ts +2 -2
  13. package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  14. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +3 -0
  15. package/esm/typings/src/execution/PipelineExecutor.d.ts +5 -5
  16. package/esm/typings/src/execution/PromptResultUsage.d.ts +3 -3
  17. package/esm/typings/src/execution/ScriptExecutionTools.d.ts +2 -3
  18. package/esm/typings/src/execution/createPipelineExecutor.d.ts +16 -3
  19. package/esm/typings/src/formats/_common/FormatDefinition.d.ts +1 -1
  20. package/esm/typings/src/knowledge/prepare-knowledge/_common/Scraper.d.ts +27 -0
  21. package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.d.ts +1 -1
  22. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  23. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
  26. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
  27. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +3 -3
  28. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
  29. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +9 -0
  30. package/esm/typings/src/prepare/preparePipeline.d.ts +1 -0
  31. package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +2 -1
  32. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
  33. package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +3 -0
  34. package/esm/typings/src/types/PipelineJson/PromptTemplateJson.d.ts +4 -0
  35. package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +5 -3
  36. package/esm/typings/src/types/Prompt.d.ts +7 -10
  37. package/esm/typings/src/types/typeAliases.d.ts +44 -4
  38. package/esm/typings/src/utils/deepFreeze.d.ts +10 -1
  39. package/esm/typings/src/utils/extractParameters.d.ts +2 -2
  40. package/esm/typings/src/{execution/utils → utils}/replaceParameters.d.ts +2 -4
  41. package/esm/typings/src/utils/sets/difference.d.ts +3 -0
  42. package/package.json +2 -2
  43. package/umd/index.umd.js +148 -10
  44. package/umd/index.umd.js.map +1 -1
  45. package/umd/typings/promptbook-collection/index.d.ts +12 -15
  46. package/umd/typings/src/_packages/core.index.d.ts +2 -1
  47. package/umd/typings/src/_packages/types.index.d.ts +2 -3
  48. package/umd/typings/src/_packages/utils.index.d.ts +4 -3
  49. package/umd/typings/src/commands/KNOWLEDGE/KnowledgeCommand.d.ts +2 -3
  50. package/umd/typings/src/commands/_common/types/CommandParser.d.ts +28 -3
  51. package/umd/typings/src/config.d.ts +26 -1
  52. package/umd/typings/src/config.test.d.ts +4 -0
  53. package/umd/typings/src/conversion/utils/extractParametersFromPromptTemplate.d.ts +2 -2
  54. package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
  55. package/umd/typings/src/conversion/validation/validatePipeline.d.ts +3 -0
  56. package/umd/typings/src/execution/PipelineExecutor.d.ts +5 -5
  57. package/umd/typings/src/execution/PromptResultUsage.d.ts +3 -3
  58. package/umd/typings/src/execution/ScriptExecutionTools.d.ts +2 -3
  59. package/umd/typings/src/execution/createPipelineExecutor.d.ts +16 -3
  60. package/umd/typings/src/formats/_common/FormatDefinition.d.ts +1 -1
  61. package/umd/typings/src/knowledge/prepare-knowledge/_common/Scraper.d.ts +27 -0
  62. package/umd/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.d.ts +1 -1
  63. package/umd/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  64. package/umd/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
  65. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  66. package/umd/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
  67. package/umd/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
  68. package/umd/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +3 -3
  69. package/umd/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
  70. package/umd/typings/src/prepare/isPipelinePrepared.d.ts +9 -0
  71. package/umd/typings/src/prepare/isPipelinePrepared.test.d.ts +1 -0
  72. package/umd/typings/src/prepare/preparePipeline.d.ts +1 -0
  73. package/umd/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +2 -1
  74. package/umd/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
  75. package/umd/typings/src/types/PipelineJson/PromptDialogJson.d.ts +3 -0
  76. package/umd/typings/src/types/PipelineJson/PromptTemplateJson.d.ts +4 -0
  77. package/umd/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +5 -3
  78. package/umd/typings/src/types/Prompt.d.ts +7 -10
  79. package/umd/typings/src/types/typeAliases.d.ts +44 -4
  80. package/umd/typings/src/utils/deepFreeze.d.ts +10 -1
  81. package/umd/typings/src/utils/extractParameters.d.ts +2 -2
  82. package/umd/typings/src/{execution/utils → utils}/replaceParameters.d.ts +2 -4
  83. package/umd/typings/src/utils/replaceParameters.test.d.ts +1 -0
  84. package/umd/typings/src/utils/sets/difference.d.ts +3 -0
  85. package/esm/typings/src/types/Parameters.d.ts +0 -14
  86. package/umd/typings/src/types/Parameters.d.ts +0 -14
  87. /package/esm/typings/src/{execution/utils/replaceParameters.test.d.ts → prepare/isPipelinePrepared.test.d.ts} +0 -0
  88. /package/{umd/typings/src/execution → esm/typings/src}/utils/replaceParameters.test.d.ts +0 -0
@@ -1,7 +1,9 @@
1
1
  import type { WritableDeep } from 'type-fest';
2
+ import type { PromptTemplateJson } from '../../../types/PipelineJson/PromptTemplateJson';
2
3
  import type { PipelineJson } from '../../../types/PipelineJson/PipelineJson';
3
4
  import type { string_markdown_text } from '../../../types/typeAliases';
4
5
  import type { string_name } from '../../../types/typeAliases';
6
+ import type { string_promptbook_documentation_url } from '../../../types/typeAliases';
5
7
  import type { string_SCREAMING_CASE } from '../../../utils/normalization/normalizeTo_SCREAMING_CASE';
6
8
  import type { CommandUsagePlace } from './CommandUsagePlaces';
7
9
  export type CommandParser<TCommand extends {
@@ -12,13 +14,16 @@ export type CommandParser<TCommand extends {
12
14
  readonly deprecatedNames?: Array<string_name & string_SCREAMING_CASE>;
13
15
  readonly usagePlaces: Array<CommandUsagePlace>;
14
16
  readonly description: string_markdown_text;
15
- readonly discussionUrl: `https://github.com/webgptorg/promptbook/discussions/${number | '@@'}`;
17
+ readonly documentationUrl: string_promptbook_documentation_url;
16
18
  readonly examples: Array<string_markdown_text>;
17
19
  /**
18
20
  * @throws {ParsingError} if the parsing fails
19
21
  */
20
22
  parse(input: CommandParserInput): TCommand;
21
- applyToPipelineJson?(pipelineJson: WritableDeep<PipelineJson>, personaCommand: TCommand): void;
23
+ /**
24
+ * @@@ Mutated by the command
25
+ */
26
+ applyToPipelineJson?(command: TCommand, subjects: ApplyToPipelineJsonSubjects): void;
22
27
  };
23
28
  export type CommandParserInput = {
24
29
  readonly usagePlace: CommandUsagePlace;
@@ -27,10 +32,30 @@ export type CommandParserInput = {
27
32
  readonly normalized: string_name & string_SCREAMING_CASE;
28
33
  readonly args: Array<string_name & string_SCREAMING_CASE>;
29
34
  };
35
+ /**
36
+ * @@@ Mutated by the command
37
+ */
38
+ export type ApplyToPipelineJsonSubjects = {
39
+ /**
40
+ * @@@ Mutated by the command
41
+ */
42
+ readonly pipelineJson: WritableDeep<PipelineJson>;
43
+ /**
44
+ * @@@
45
+ *
46
+ * @@@ Mutated by the command
47
+ *
48
+ * When used in
49
+ * - `PIPELINE_HEAD` it is `null`
50
+ * - `PIPELINE_TEMPLATE` it is the prompt template
51
+ */
52
+ readonly templateJson: null | Partial<WritableDeep<PromptTemplateJson>>;
53
+ };
30
54
  /**
31
55
  * TODO: @@@ Annotate all
56
+ * TODO: [🍧][♓️] Add order here
32
57
  * TODO: [🧠] Maybe put flag if it is for whole `.ptbk.md` file of just one section
33
- * TODO: [🍧] CommandParser should have applyToPipelineJson method
58
+ * TODO: [🍧] All commands must implement `applyToPipelineJson` method
34
59
  * which will apply parsed command to the pipeline JSON
35
60
  * it will be called from `pipelineStringToJsonSync`
36
61
  * and replace hardcoded switch statement and [💐]
@@ -10,10 +10,28 @@ export declare const CHARACTER_LOOP_LIMIT = 100000;
10
10
  * The maximum number of (LLM) tasks running in parallel
11
11
  */
12
12
  export declare const MAX_PARALLEL_COUNT = 5;
13
+ /**
14
+ * The maximum number of attempts to execute LLM task before giving up
15
+ */
16
+ export declare const MAX_EXECUTION_ATTEMPTS = 3;
13
17
  /**
14
18
  * The maximum length of the (generated) filename
15
19
  */
16
20
  export declare const MAX_FILENAME_LENGTH = 30;
21
+ /**
22
+ * @@@
23
+ * TODO: [🐝] !!! Use
24
+ */
25
+ export declare const MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH = 3;
26
+ /**
27
+ * @@@
28
+ * TODO: [🐝] !!! Use
29
+ */
30
+ export declare const MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL = 200;
31
+ /**
32
+ * Where to store the cache of executions for promptbook CLI
33
+ */
34
+ export declare const EXECUTIONS_CACHE_DIRNAME = "/.promptbook/executions-cache";
17
35
  /**
18
36
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
19
37
  */
@@ -21,8 +39,15 @@ export declare const PIPELINE_COLLECTION_BASE_FILENAME = "index";
21
39
  /**
22
40
  * The names of the parameters that are reserved for special purposes
23
41
  */
24
- export declare const RESERVED_PARAMETER_NAMES: readonly string[];
42
+ export declare const RESERVED_PARAMETER_NAMES: readonly ["context", "currentDate"];
43
+ /**
44
+ * @@@
45
+ */
46
+ export declare const DEBUG_ALLOW_PAYED_TESTING: boolean;
25
47
  /**
26
48
  * Nonce which is used for replacing things in strings
27
49
  */
28
50
  export declare const REPLACING_NONCE = "u$k42k%!V2zo34w7Fu#@QUHYPW";
51
+ /**
52
+ * TODO: [🔼] Export all to core
53
+ */
@@ -0,0 +1,4 @@
1
+ export {};
2
+ /**
3
+ * TODO: [🧠] Maybe more elegant how to prevent accidental costs
4
+ */
@@ -1,5 +1,5 @@
1
1
  import type { PromptTemplateJson } from '../../types/PipelineJson/PromptTemplateJson';
2
- import type { string_name } from '../../types/typeAliases';
2
+ import type { string_parameter_name } from '../../types/typeAliases';
3
3
  /**
4
4
  * Parses the prompt template and returns the set of all used parameters
5
5
  *
@@ -7,7 +7,7 @@ import type { string_name } from '../../types/typeAliases';
7
7
  * @returns the set of parameter names
8
8
  * @throws {ParsingError} if the script is invalid
9
9
  */
10
- export declare function extractParametersFromPromptTemplate(promptTemplate: Pick<PromptTemplateJson, 'title' | 'description' | 'blockType' | 'content'>): Set<string_name>;
10
+ export declare function extractParametersFromPromptTemplate(promptTemplate: Pick<PromptTemplateJson, 'title' | 'description' | 'blockType' | 'content' | 'jokerParameterNames'>): Set<string_parameter_name>;
11
11
  /**
12
12
  * TODO: [🔣] If script require contentLanguage
13
13
  */
@@ -7,7 +7,7 @@ import type { string_json } from '../../types/typeAliases';
7
7
  */
8
8
  export declare function stringifyPipelineJson<TType>(pipeline: TType): string_json<TType>;
9
9
  /**
10
+ * TODO: !!!! Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
10
11
  * TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
11
- * TODO: [🔼] Export alongside pipelineStringToJson
12
12
  * TODO: [🧠] Maybe more elegant solution than replacing via regex
13
13
  */
@@ -28,6 +28,9 @@ export declare function validatePipeline(pipeline: PipelineJson): PipelineJson;
28
28
  /**
29
29
  * TODO: [🧠][🐣] !!!! Validate that all samples match expectations
30
30
  * TODO: [🧠][🐣] !!!! Validate that knowledge is valid (non-void)
31
+ * TODO: [🧠][🐣] !!!! Validate that persona can be used only with CHAT variant
32
+ * TODO: !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
33
+ * TODO: !!!! Validate that reserved parameter is not used as joker
31
34
  * TODO: [🧠] !!! Validationg not only logic itself but imports around - files and websites and rerefenced pipelines exists
32
35
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
33
36
  */
@@ -1,8 +1,8 @@
1
1
  import type { Promisable } from 'type-fest';
2
+ import { PipelineExecutionError } from '../errors/PipelineExecutionError';
2
3
  import type { TaskProgress } from '../types/TaskProgress';
3
4
  import type { ExecutionReportJson } from '../types/execution-report/ExecutionReportJson';
4
- import type { string_parameter_name } from '../types/typeAliases';
5
- import type { string_parameter_value } from '../types/typeAliases';
5
+ import type { Parameters } from '../types/typeAliases';
6
6
  import type { PromptResultUsage } from './PromptResultUsage';
7
7
  /**
8
8
  * Executor is a simple async function that takes INPUT PARAMETERs and returns result parameters _(along with all intermediate parameters and INPUT PARAMETERs = it extends input object)_.
@@ -13,7 +13,7 @@ import type { PromptResultUsage } from './PromptResultUsage';
13
13
  * @see https://github.com/webgptorg/promptbook#executor
14
14
  */
15
15
  export type PipelineExecutor = {
16
- (inputParameters: Record<string_parameter_name, string_parameter_value>, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<{
16
+ (inputParameters: Parameters, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<{
17
17
  /**
18
18
  * Whether the execution was successful, details are aviable in `executionReport`
19
19
  */
@@ -25,7 +25,7 @@ export type PipelineExecutor = {
25
25
  /**
26
26
  * Errors that occured during the execution, details are aviable in `executionReport`
27
27
  */
28
- readonly errors: Array<Error>;
28
+ readonly errors: Array<PipelineExecutionError | Error>;
29
29
  /**
30
30
  * The report of the execution with all details
31
31
  */
@@ -35,7 +35,7 @@ export type PipelineExecutor = {
35
35
  *
36
36
  * Note: If the execution was not successful, there are only some of the result parameters
37
37
  */
38
- readonly outputParameters: Record<string_parameter_name, string_parameter_value>;
38
+ readonly outputParameters: Parameters;
39
39
  }>;
40
40
  };
41
41
  /**
@@ -10,15 +10,15 @@ export type PromptResultUsage = {
10
10
  *
11
11
  * Note: If the cost is unknown, the value 0 and isUncertain is true
12
12
  */
13
- price: UncertainNumber;
13
+ readonly price: UncertainNumber;
14
14
  /**
15
15
  * Number of whatever used in the input aka. `prompt_tokens`
16
16
  */
17
- input: PromptResultUsageCounts;
17
+ readonly input: PromptResultUsageCounts;
18
18
  /**
19
19
  * Number of tokens used in the output aka. `completion_tokens`
20
20
  */
21
- output: PromptResultUsageCounts;
21
+ readonly output: PromptResultUsageCounts;
22
22
  };
23
23
  /**
24
24
  * Record of all possible measurable units
@@ -1,6 +1,5 @@
1
1
  import type { ScriptLanguage } from '../types/ScriptLanguage';
2
- import type { string_parameter_name } from '../types/typeAliases';
3
- import type { string_parameter_value } from '../types/typeAliases';
2
+ import type { Parameters } from '../types/typeAliases';
4
3
  import type { string_script } from '../types/typeAliases';
5
4
  /**
6
5
  * Represents all the tools needed to EXECUTE SCRIPTs
@@ -23,7 +22,7 @@ export type ScriptExecutionToolsExecuteOptions = {
23
22
  * Theese parameters are passed to the script as variables
24
23
  * For example: { "name": "John" } => const name = "John";
25
24
  */
26
- readonly parameters: Record<string_parameter_name, string_parameter_value>;
25
+ readonly parameters: Parameters;
27
26
  /**
28
27
  * The content of the script to execute
29
28
  * - It can be a single statement
@@ -5,9 +5,21 @@ type CreatePipelineExecutorSettings = {
5
5
  /**
6
6
  * When executor does not satisfy expectations it will be retried this amount of times
7
7
  *
8
- * @default 3
8
+ * @default MAX_EXECUTION_ATTEMPTS
9
9
  */
10
- readonly maxExecutionAttempts: number;
10
+ readonly maxExecutionAttempts?: number;
11
+ /**
12
+ * Maximum number of tasks running in parallel
13
+ *
14
+ * @default MAX_PARALLEL_COUNT
15
+ */
16
+ readonly maxParallelCount?: number;
17
+ /**
18
+ * If true, the preparation logs additional information
19
+ *
20
+ * @default false
21
+ */
22
+ readonly isVerbose?: boolean;
11
23
  };
12
24
  /**
13
25
  * Options for `createPipelineExecutor`
@@ -35,7 +47,8 @@ interface CreatePipelineExecutorOptions {
35
47
  export declare function createPipelineExecutor(options: CreatePipelineExecutorOptions): PipelineExecutor;
36
48
  export {};
37
49
  /**
38
- * TODO: [🪂] Pass maxParallelCount here
50
+ * TODO: Use isVerbose here (not only pass to `preparePipeline`)
51
+ * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
39
52
  * TODO: [♈] Probbably move expectations from templates to parameters
40
53
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
41
54
  * TODO: [👧] Strongly type the executors to avoid need of remove nullables whtn noUncheckedIndexedAccess in tsconfig.json
@@ -65,5 +65,5 @@ export type FormatDefinition<TValue extends TPartialValue, TPartialValue extends
65
65
  * TODO: [🍓][👨‍⚖️] Compute TPartialValue dynamically - PartialString<TValue>
66
66
  * TODO: [🍓][🧠] Should execution tools be aviable to heal, canBeValid and isValid?
67
67
  * TODO: [🍓][🧠] llm Provider Bindings
68
- * TODO: [🍓]Export via some package
68
+ * TODO: [🍓][🔼] Export via some package
69
69
  */
@@ -0,0 +1,27 @@
1
+ import type { Promisable } from 'type-fest';
2
+ import type { string_file_path } from '../../../types/typeAliases';
3
+ import type { string_knowledge_source_link } from '../../../types/typeAliases';
4
+ import type { string_markdown } from '../../../types/typeAliases';
5
+ import type { string_markdown_text } from '../../../types/typeAliases';
6
+ import type { string_mime_type } from '../../../types/typeAliases';
7
+ import type { string_mime_type_with_wildcard } from '../../../types/typeAliases';
8
+ import type { string_promptbook_documentation_url } from '../../../types/typeAliases';
9
+ import type { really_unknown } from '../../../utils/organization/really_unknown';
10
+ export type Scraper = {
11
+ readonly description: string_markdown_text;
12
+ readonly documentationUrl: string_promptbook_documentation_url;
13
+ readonly examples: Array<string_file_path>;
14
+ readonly mimeTypes: Array<string_mime_type_with_wildcard>;
15
+ scrape(source: ScraperSourceOptions): Promisable<string_markdown>;
16
+ };
17
+ export type ScraperSourceOptions = {
18
+ readonly source: string_knowledge_source_link;
19
+ readonly mimeType: string_mime_type;
20
+ asJson(): Promise<really_unknown>;
21
+ asText(): Promise<really_unknown>;
22
+ asBlob(): Promise<really_unknown>;
23
+ };
24
+ /**
25
+ * TODO: [🐝] @@@ Annotate all
26
+ * TODO: [🔼] Export via types
27
+ */
@@ -8,7 +8,7 @@ import type { KnowledgeSourceJson } from '../../../types/PipelineJson/KnowledgeS
8
8
  */
9
9
  export declare function prepareKnowledgePieces(knowledgeSources: Array<KnowledgeSourceJson>, options: PrepareOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'preparationIds'>>>;
10
10
  /**
11
- * TODO: [🔼] !!! Export via `@promptbook/core`
11
+ * TODO: [🐝][🔼] !!! Export via `@promptbook/core`
12
12
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
13
13
  * Put `knowledgePieces` into `PrepareKnowledgeOptions`
14
14
  * TODO: [🪂] More than max things can run in parallel by acident [1,[2a,2b,_],[3a,3b,_]]
@@ -6,7 +6,7 @@ import type { string_markdown } from '../../../types/typeAliases';
6
6
  */
7
7
  export declare function prepareKnowledgeFromMarkdown(content: string_markdown, options: PrepareOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>>>;
8
8
  /**
9
- * TODO: [🔼] !!! Export via `@promptbook/markdown`
9
+ * TODO: [🐝][🔼] !!! Export via `@promptbook/markdown`
10
10
  * TODO: [🪂] Do it in parallel 11:11
11
11
  * Note: No need to aggregate usage here, it is done by intercepting the llmTools
12
12
  */
@@ -6,7 +6,7 @@ import type { string_base64 } from '../../../types/typeAliases';
6
6
  */
7
7
  export declare function prepareKnowledgeFromPdf(content: string_base64, options: PrepareOptions): Promise<Array<Omit<KnowledgePiecePreparedJson, 'sources' | 'preparationIds'>>>;
8
8
  /**
9
- * TODO: [🔼] !!! Export via `@promptbook/pdf`
9
+ * TODO: [🐝][🔼] !!! Export via `@promptbook/pdf`
10
10
  * TODO: [🧺] In future, content can be alse File or Blob BUT for now for wider compatibility its only base64
11
11
  * @see https://stackoverflow.com/questions/14653349/node-js-cant-create-blobs
12
12
  * TODO: [🪂] Do it in parallel
@@ -26,7 +26,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
26
26
  /**
27
27
  * Calls Anthropic Claude API to use a chat model.
28
28
  */
29
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<ChatPromptResult>;
29
+ callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
30
30
  /**
31
31
  * Get the model that should be used as default
32
32
  */
@@ -27,11 +27,11 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
27
27
  /**
28
28
  * Calls OpenAI API to use a chat model.
29
29
  */
30
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<ChatPromptResult>;
30
+ callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
31
31
  /**
32
32
  * Calls Azure OpenAI API to use a complete model.
33
33
  */
34
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<CompletionPromptResult>;
34
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
35
35
  /**
36
36
  * Changes Azure error (which is not propper Error but object) to propper Error
37
37
  */
@@ -18,11 +18,11 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
18
18
  /**
19
19
  * Mocks chat model
20
20
  */
21
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<ChatPromptResult>;
21
+ callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
22
22
  /**
23
23
  * Mocks completion model
24
24
  */
25
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<CompletionPromptResult>;
25
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
26
26
  /**
27
27
  * List all available mocked-models that can be used
28
28
  */
@@ -19,15 +19,15 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
19
19
  /**
20
20
  * Fakes chat model
21
21
  */
22
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<ChatPromptResult & CompletionPromptResult>;
22
+ callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<ChatPromptResult & CompletionPromptResult>;
23
23
  /**
24
24
  * Fakes completion model
25
25
  */
26
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<CompletionPromptResult>;
26
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<CompletionPromptResult>;
27
27
  /**
28
28
  * Fakes embedding model
29
29
  */
30
- callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<EmbeddingPromptResult>;
30
+ callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<EmbeddingPromptResult>;
31
31
  /**
32
32
  * List all available fake-models that can be used
33
33
  */
@@ -28,15 +28,15 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
28
28
  /**
29
29
  * Calls OpenAI API to use a chat model.
30
30
  */
31
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectFormat'>): Promise<ChatPromptResult>;
31
+ callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectFormat'>): Promise<ChatPromptResult>;
32
32
  /**
33
33
  * Calls OpenAI API to use a complete model.
34
34
  */
35
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<CompletionPromptResult>;
35
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
36
36
  /**
37
37
  * Calls OpenAI API to use a embedding model
38
38
  */
39
- callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<EmbeddingPromptResult>;
39
+ callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<EmbeddingPromptResult>;
40
40
  /**
41
41
  * Get the model that should be used as default
42
42
  */
@@ -0,0 +1,9 @@
1
+ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
2
+ /**
3
+ * Determine if the pipeline is fully prepared
4
+ */
5
+ export declare function isPipelinePrepared(pipeline: PipelineJson): boolean;
6
+ /**
7
+ * TODO: [🔼] Export via core or utils
8
+ * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
9
+ */
@@ -8,6 +8,7 @@ import type { PrepareOptions } from './PrepareOptions';
8
8
  */
9
9
  export declare function preparePipeline(pipeline: PipelineJson, options: PrepareOptions): Promise<PipelineJson>;
10
10
  /**
11
+ * TODO: !!!!! Index the samples and maybe templates
11
12
  * TODO: [🔼] !!! Export via `@promptbook/core`
12
13
  * TODO: Write tests for `preparePipeline`
13
14
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -9,7 +9,7 @@ export type LlmTemplateJson = PromptTemplateJsonCommon & {
9
9
  /**
10
10
  * Name of the persona who will be responding to this prompt
11
11
  */
12
- readonly personaName: string_name | null;
12
+ readonly personaName?: string_name;
13
13
  /**
14
14
  * Requirements for the model
15
15
  * - This is required only for blockType PROMPT_TEMPLATE
@@ -17,6 +17,7 @@ export type LlmTemplateJson = PromptTemplateJsonCommon & {
17
17
  readonly modelRequirements: ModelRequirements;
18
18
  };
19
19
  /**
20
+ * TODO: [🧠][🥜]
20
21
  * TODO: [👙][🧠] Maybe add `knowledge`, `actions` and `instruments` to be available granularly for each template
21
22
  * @see https://github.com/webgptorg/promptbook/discussions/79
22
23
  * TODO: [💕][🧠] Just selecting gpt3 or gpt4 level of model
@@ -79,6 +79,7 @@ export type PipelineJson = {
79
79
  readonly preparations: Array<PreparationJson>;
80
80
  };
81
81
  /**
82
+ * TODO: [🧠] Maybe wrap all {parameterNames} in brackets for example { "resultingParameterName": "{foo}" }
82
83
  * Note: There was a proposal for multiple types of promptbook objects 78816ff33e2705ee1a187aa2eb8affd976d4ea1a
83
84
  * But then immediately reverted back to the single type
84
85
  * With knowledge as part of the promptbook and collection just as a collection of promptbooks
@@ -7,3 +7,6 @@ import type { PromptTemplateJsonCommon } from './PromptTemplateJsonCommon';
7
7
  export interface PromptDialogJson extends PromptTemplateJsonCommon {
8
8
  readonly blockType: 'PROMPT_DIALOG';
9
9
  }
10
+ /**
11
+ * TODO: [🧠][🥜]
12
+ */
@@ -7,3 +7,7 @@ import type { SimpleTemplateJson } from './SimpleTemplateJson';
7
7
  * Describes one prompt template in the promptbook
8
8
  */
9
9
  export type PromptTemplateJson = LlmTemplateJson | SimpleTemplateJson | ScriptJson | PromptDialogJson | ___ | ___ | ___ | ___;
10
+ /**
11
+ * TODO: [🧠][🥜] What is propper name for this - "Template", "Prompt template",...
12
+ * TODO: [🧠][🥜] Reduce confusion of `PromptTemplateJson` vs (`LlmTemplateJson` which is 'PROMPT_TEMPLATE')
13
+ */
@@ -5,6 +5,7 @@ import type { string_javascript_name } from '../typeAliases';
5
5
  import type { string_markdown } from '../typeAliases';
6
6
  import type { string_markdown_text } from '../typeAliases';
7
7
  import type { string_name } from '../typeAliases';
8
+ import type { string_parameter_name } from '../typeAliases';
8
9
  import type { string_prompt } from '../typeAliases';
9
10
  import type { string_template } from '../typeAliases';
10
11
  import type { Expectations } from './Expectations';
@@ -34,13 +35,13 @@ export interface PromptTemplateJsonCommon {
34
35
  *
35
36
  * Note: Joker is one of the dependent parameters
36
37
  */
37
- readonly dependentParameterNames: Array<string_name>;
38
+ readonly dependentParameterNames: Array<string_parameter_name>;
38
39
  /**
39
40
  * If theese parameters meet the expectations requirements, they are used instead of executing this prompt template
40
41
  *
41
42
  * @see https://github.com/webgptorg/promptbook/discussions/66
42
43
  */
43
- readonly jokers?: Array<string>;
44
+ readonly jokerParameterNames?: Array<string_parameter_name>;
44
45
  /**
45
46
  * Type of the execution
46
47
  * This determines if the prompt template is send to LLM, user or some scripting evaluation
@@ -55,7 +56,7 @@ export interface PromptTemplateJsonCommon {
55
56
  *
56
57
  * @see https://github.com/webgptorg/promptbook/discussions/31
57
58
  */
58
- readonly postprocessing?: Array<string_javascript_name>;
59
+ readonly postprocessingFunctionNames?: Array<string_javascript_name>;
59
60
  /**
60
61
  * Expect this amount of each unit in the answer
61
62
  *
@@ -79,6 +80,7 @@ export interface PromptTemplateJsonCommon {
79
80
  readonly resultingParameterName: string_name;
80
81
  }
81
82
  /**
83
+ * TODO: [🧠][🥜]
82
84
  * TODO: use one helper type> (string_prompt | string_javascript | string_markdown) & string_template
83
85
  * TODO: [♈] Probbably move expectations from templates to parameters
84
86
  */
@@ -5,10 +5,10 @@ import type { CompletionModelRequirements } from './ModelRequirements';
5
5
  import type { EmbeddingModelRequirements } from './ModelRequirements';
6
6
  import type { ModelRequirements } from './ModelRequirements';
7
7
  import type { Expectations } from './PipelineJson/Expectations';
8
- import type { string_parameter_name } from './typeAliases';
9
- import type { string_parameter_value } from './typeAliases';
8
+ import type { Parameters } from './typeAliases';
10
9
  import type { string_pipeline_url_with_hashtemplate } from './typeAliases';
11
10
  import type { string_prompt } from './typeAliases';
11
+ import type { string_template } from './typeAliases';
12
12
  import type { string_title } from './typeAliases';
13
13
  /**
14
14
  * Prompt in a text along with model requirements, but without any execution or templating logic.
@@ -63,12 +63,11 @@ export type CommonPrompt = {
63
63
  */
64
64
  readonly title: string_title;
65
65
  /**
66
- * The text of the prompt
66
+ * The text of the prompt with placeholders for parameters
67
67
  *
68
- * Note: This is not a template, this is exactly the text that will be sent to the model
69
- * @example "What is the capital of France?"
68
+ * @example "What is the capital of {country}?"
70
69
  */
71
- readonly content: string_prompt;
70
+ readonly content: string_prompt & string_template;
72
71
  /**
73
72
  * Requirements for the model
74
73
  */
@@ -98,11 +97,9 @@ export type CommonPrompt = {
98
97
  */
99
98
  readonly pipelineUrl?: string_pipeline_url_with_hashtemplate;
100
99
  /**
101
- * Parameters used in the prompt
102
- *
103
- * Note: This is redundant (same information is in pipelineUrl+content) but useful for logging and debugging
100
+ * Parameters used in the `content`
104
101
  */
105
- readonly parameters: Record<string_parameter_name, string_parameter_value>;
102
+ readonly parameters: Parameters;
106
103
  };
107
104
  /**
108
105
  * TODO: [🔼] !!!! Export all from `@promptbook/types`
@@ -1,3 +1,4 @@
1
+ import { RESERVED_PARAMETER_NAMES } from '../config';
1
2
  /**
2
3
  * Semantic helper
3
4
  */
@@ -17,7 +18,7 @@ export type string_prompt = string;
17
18
  /**
18
19
  * Semantic helper
19
20
  *
20
- * For example `"A cat wearing a {ITEM}"`
21
+ * For example `"A cat wearing a {item}"`
21
22
  */
22
23
  export type string_template = string;
23
24
  /**
@@ -85,14 +86,36 @@ export type string_name = string;
85
86
  *
86
87
  * For example `"eventName"`
87
88
  */
88
- export type string_parameter_name = string;
89
+ export type string_parameter_name = string_name;
89
90
  /**
90
91
  * Semantic helper
91
- * Unique identifier of anything
92
+ * Unique identifier of parameter
92
93
  *
93
94
  * For example `"DevConf 2024"`
94
95
  */
95
96
  export type string_parameter_value = string;
97
+ /**
98
+ * Parameters of the pipeline
99
+ *
100
+ * There are three types of parameters:
101
+ * - **INPUT PARAMETERs** are required to execute the pipeline.
102
+ * - **Intermediate parameters** are used internally in the pipeline.
103
+ * - **OUTPUT PARAMETERs** are not used internally in the pipeline, but are returned as the result of the pipeline execution.
104
+ *
105
+ * @see https://ptbk.io/parameters
106
+ */
107
+ export type Parameters = Exclude<Record<string_parameter_name, string_parameter_value>, ReservedParameters>;
108
+ /**
109
+ * Semantic helper
110
+ * Unique identifier of reserved parameter
111
+ *
112
+ * For example `"context"`
113
+ */
114
+ export type string_reserved_parameter_name = typeof RESERVED_PARAMETER_NAMES[number];
115
+ /**
116
+ * @@@
117
+ */
118
+ export type ReservedParameters = Record<string_reserved_parameter_name, string_parameter_value>;
96
119
  /**
97
120
  * Semantic helper
98
121
  * Title of anything
@@ -115,8 +138,21 @@ export type string_persona_description = string;
115
138
  * For example `"./pavol-hejny-cv.pdf"`
116
139
  * For example `"Pavol Hejný has web https://pavolhejny.com/"`
117
140
  * For example `"Pavol Hejný is web developer and creator of Promptbook and Collboard"`
141
+ *
142
+ * @@@ string_knowledge_source vs string_knowledge_source_link
143
+ */
144
+ export type string_knowledge_source = string_knowledge_source_link | string_markdown;
145
+ /**
146
+ * One link to knowledge source
147
+ *
148
+ * It can be a link or relative path
149
+ *
150
+ * For example `"https://pavolhejny.com/"`
151
+ * For example `"./pavol-hejny-cv.pdf"`
152
+ *
153
+ * @@@ string_knowledge_source vs string_knowledge_source_link
118
154
  */
119
- export type string_knowledge_source = string_url | string_file_path | string;
155
+ export type string_knowledge_source_link = string_url | string_file_path;
120
156
  /**
121
157
  * Semantic helper
122
158
  *
@@ -160,6 +196,10 @@ export type string_markdown_section_content = string;
160
196
  * For example `"**Hello** World!"`
161
197
  */
162
198
  export type string_markdown_text = string;
199
+ /**
200
+ * @@@
201
+ */
202
+ export type string_promptbook_documentation_url = `https://github.com/webgptorg/promptbook/discussions/${number | '@@'}`;
163
203
  /**
164
204
  * Semantic helper
165
205
  *