@promptbook/openai 0.66.0-9 → 0.67.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/esm/index.es.js +154 -17
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +6 -4
  4. package/esm/typings/src/_packages/types.index.d.ts +7 -1
  5. package/esm/typings/src/_packages/utils.index.d.ts +14 -8
  6. package/esm/typings/src/commands/EXPECT/ExpectFormatCommand.d.ts +2 -0
  7. package/esm/typings/src/config.d.ts +6 -0
  8. package/esm/typings/src/errors/{ReferenceError.d.ts → PipelineUrlError.d.ts} +2 -2
  9. package/esm/typings/src/errors/index.d.ts +27 -0
  10. package/esm/typings/src/errors/utils/ErrorJson.d.ts +20 -0
  11. package/esm/typings/src/errors/utils/deserializeError.d.ts +7 -0
  12. package/esm/typings/src/errors/utils/deserializeError.test.d.ts +1 -0
  13. package/esm/typings/src/errors/utils/serializeError.d.ts +7 -0
  14. package/esm/typings/src/errors/utils/serializeError.test.d.ts +1 -0
  15. package/esm/typings/src/execution/ExecutionTools.d.ts +4 -1
  16. package/esm/typings/src/execution/PipelineExecutor.d.ts +1 -47
  17. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +49 -0
  18. package/esm/typings/src/execution/PromptResult.d.ts +5 -4
  19. package/esm/typings/src/execution/PromptResultUsage.d.ts +4 -0
  20. package/esm/typings/src/execution/UncertainNumber.d.ts +1 -0
  21. package/esm/typings/src/execution/assertsExecutionSuccessful.d.ts +2 -2
  22. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +0 -1
  23. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +2 -2
  24. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +3 -3
  25. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  26. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_Error.d.ts +2 -6
  27. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  28. package/esm/typings/src/scripting/javascript/JavascriptExecutionToolsOptions.d.ts +2 -2
  29. package/esm/typings/src/storage/_common/PromptbookStorage.d.ts +1 -1
  30. package/esm/typings/src/types/ModelRequirements.d.ts +5 -5
  31. package/esm/typings/src/types/PipelineJson/Expectations.d.ts +3 -1
  32. package/esm/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +2 -0
  33. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +4 -0
  34. package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +2 -0
  35. package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +4 -0
  36. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +2 -0
  37. package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +1 -0
  38. package/esm/typings/src/types/PipelineJson/PromptTemplateJson.d.ts +2 -0
  39. package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +2 -2
  40. package/esm/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +2 -0
  41. package/esm/typings/src/types/PipelineJson/ScriptJson.d.ts +1 -0
  42. package/esm/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +1 -0
  43. package/esm/typings/src/types/Prompt.d.ts +7 -7
  44. package/esm/typings/src/types/ScriptLanguage.d.ts +2 -0
  45. package/esm/typings/src/types/execution-report/ExecutionPromptReportJson.d.ts +24 -0
  46. package/esm/typings/src/types/execution-report/ExecutionReportJson.d.ts +3 -20
  47. package/esm/typings/src/types/typeAliases.d.ts +7 -0
  48. package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +1 -4
  49. package/esm/typings/src/utils/serialization/$asDeeplyFrozenSerializableJson.d.ts +17 -0
  50. package/esm/typings/src/utils/{deepFreeze.d.ts → serialization/$deepFreeze.d.ts} +0 -10
  51. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +27 -0
  52. package/esm/typings/src/utils/{clonePipeline.d.ts → serialization/clonePipeline.d.ts} +1 -1
  53. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +24 -0
  54. package/esm/typings/src/utils/serialization/isSerializableAsJson.test.d.ts +1 -0
  55. package/package.json +2 -2
  56. package/umd/index.umd.js +154 -17
  57. package/umd/index.umd.js.map +1 -1
  58. package/esm/typings/src/errors/VersionMismatchError.d.ts +0 -10
  59. /package/esm/typings/src/utils/{deepClone.d.ts → serialization/deepClone.d.ts} +0 -0
@@ -4,14 +4,12 @@ import { extractParameterNamesFromPromptTemplate } from '../conversion/utils/ext
4
4
  import { extractVariables } from '../conversion/utils/extractVariables';
5
5
  import { renameParameter } from '../conversion/utils/renameParameter';
6
6
  import { titleToName } from '../conversion/utils/titleToName';
7
+ import { deserializeError } from '../errors/utils/deserializeError';
8
+ import { serializeError } from '../errors/utils/serializeError';
7
9
  import { forEachAsync } from '../execution/utils/forEachAsync';
8
10
  import { isValidJsonString } from '../formats/json/utils/isValidJsonString';
9
11
  import { extractBlock } from '../postprocessing/utils/extractBlock';
10
12
  import { $currentDate } from '../utils/$currentDate';
11
- import { clonePipeline } from '../utils/clonePipeline';
12
- import { deepClone } from '../utils/deepClone';
13
- import { $deepFreeze } from '../utils/deepFreeze';
14
- import { $getGlobalScope } from '../utils/environment/$getGlobalScope';
15
13
  import { $isRunningInBrowser } from '../utils/environment/$isRunningInBrowser';
16
14
  import { $isRunningInNode } from '../utils/environment/$isRunningInNode';
17
15
  import { $isRunningInWebWorker } from '../utils/environment/$isRunningInWebWorker';
@@ -51,6 +49,11 @@ import { $randomSeed } from '../utils/random/$randomSeed';
51
49
  import { removeEmojis } from '../utils/removeEmojis';
52
50
  import { removeQuotes } from '../utils/removeQuotes';
53
51
  import { replaceParameters } from '../utils/replaceParameters';
52
+ import { $deepFreeze } from '../utils/serialization/$deepFreeze';
53
+ import { checkSerializableAsJson } from '../utils/serialization/checkSerializableAsJson';
54
+ import { clonePipeline } from '../utils/serialization/clonePipeline';
55
+ import { deepClone } from '../utils/serialization/deepClone';
56
+ import { isSerializableAsJson } from '../utils/serialization/isSerializableAsJson';
54
57
  import { difference } from '../utils/sets/difference';
55
58
  import { intersection } from '../utils/sets/intersection';
56
59
  import { union } from '../utils/sets/union';
@@ -73,14 +76,12 @@ export { extractParameterNamesFromPromptTemplate };
73
76
  export { extractVariables };
74
77
  export { renameParameter };
75
78
  export { titleToName };
79
+ export { deserializeError };
80
+ export { serializeError };
76
81
  export { forEachAsync };
77
82
  export { isValidJsonString };
78
83
  export { extractBlock };
79
84
  export { $currentDate };
80
- export { clonePipeline };
81
- export { deepClone };
82
- export { $deepFreeze };
83
- export { $getGlobalScope };
84
85
  export { $isRunningInBrowser };
85
86
  export { $isRunningInNode };
86
87
  export { $isRunningInWebWorker };
@@ -120,6 +121,11 @@ export { $randomSeed };
120
121
  export { removeEmojis };
121
122
  export { removeQuotes };
122
123
  export { replaceParameters };
124
+ export { $deepFreeze };
125
+ export { checkSerializableAsJson };
126
+ export { clonePipeline };
127
+ export { deepClone };
128
+ export { isSerializableAsJson };
123
129
  export { difference };
124
130
  export { intersection };
125
131
  export { union };
@@ -1,5 +1,7 @@
1
1
  /**
2
2
  * Represents a command that expects a specific format.
3
+ *
4
+ * Note: [🚉] This is fully serializable as JSON
3
5
  */
4
6
  export type ExpectFormatCommand = {
5
7
  readonly type: 'EXPECT_FORMAT';
@@ -119,6 +119,12 @@ export declare const DEFAULT_REMOTE_URL = "https://api.pavolhejny.com/";
119
119
  * @public exported from `@promptbook/core`
120
120
  */
121
121
  export declare const DEFAULT_REMOTE_URL_PATH = "/promptbook/socket.io";
122
+ /**
123
+ * @@@
124
+ *
125
+ * @public exported from `@promptbook/core`
126
+ */
127
+ export declare const IS_VERBOSE = false;
122
128
  /**
123
129
  * @@@
124
130
  *
@@ -3,7 +3,7 @@
3
3
  *
4
4
  * @public exported from `@promptbook/core`
5
5
  */
6
- export declare class ReferenceError extends Error {
7
- readonly name = "ReferenceError";
6
+ export declare class PipelineUrlError extends Error {
7
+ readonly name = "PipelineUrlError";
8
8
  constructor(message: string);
9
9
  }
@@ -0,0 +1,27 @@
1
+ import { CollectionError } from './CollectionError';
2
+ import { EnvironmentMismatchError } from './EnvironmentMismatchError';
3
+ import { LimitReachedError } from './LimitReachedError';
4
+ import { NotFoundError } from './NotFoundError';
5
+ import { NotYetImplementedError } from './NotYetImplementedError';
6
+ import { ParsingError } from './ParsingError';
7
+ import { PipelineExecutionError } from './PipelineExecutionError';
8
+ import { PipelineLogicError } from './PipelineLogicError';
9
+ import { PipelineUrlError } from './PipelineUrlError';
10
+ import { UnexpectedError } from './UnexpectedError';
11
+ /**
12
+ * Index of all custom errors
13
+ *
14
+ * @public exported from `@promptbook/core`
15
+ */
16
+ export declare const ERRORS: {
17
+ readonly CollectionError: typeof CollectionError;
18
+ readonly EnvironmentMismatchError: typeof EnvironmentMismatchError;
19
+ readonly LimitReachedError: typeof LimitReachedError;
20
+ readonly NotFoundError: typeof NotFoundError;
21
+ readonly NotYetImplementedError: typeof NotYetImplementedError;
22
+ readonly ParsingError: typeof ParsingError;
23
+ readonly PipelineExecutionError: typeof PipelineExecutionError;
24
+ readonly PipelineLogicError: typeof PipelineLogicError;
25
+ readonly PipelineUrlError: typeof PipelineUrlError;
26
+ readonly UnexpectedError: typeof UnexpectedError;
27
+ };
@@ -0,0 +1,20 @@
1
+ import type { ERRORS } from '../index';
2
+ /**
3
+ * Represents a serialized error or custom Promptbook error
4
+ *
5
+ * Note: [🚉] This is fully serializable as JSON
6
+ */
7
+ export type ErrorJson = {
8
+ /**
9
+ * The type of the error
10
+ */
11
+ readonly name: keyof typeof ERRORS | 'Error';
12
+ /**
13
+ * The message of the error
14
+ */
15
+ readonly message: string;
16
+ /**
17
+ * The stack trace of the error
18
+ */
19
+ readonly stack?: string;
20
+ };
@@ -0,0 +1,7 @@
1
+ import type { ErrorJson } from './ErrorJson';
2
+ /**
3
+ * Deserializes the error object
4
+ *
5
+ * @public exported from `@promptbook/utils`
6
+ */
7
+ export declare function deserializeError(error: ErrorJson): Error;
@@ -0,0 +1,7 @@
1
+ import type { ErrorJson } from './ErrorJson';
2
+ /**
3
+ * Serializes an error into a [🚉] JSON-serializable object
4
+ *
5
+ * @public exported from `@promptbook/utils`
6
+ */
7
+ export declare function serializeError(error: Error): ErrorJson;
@@ -11,7 +11,7 @@ export type ExecutionTools = {
11
11
  /**
12
12
  * Tools for executing prompts to large language models like GPT-4
13
13
  *
14
- * Tip: Combine multiple LLM execution tools - use array of LlmExecutionTools instead of single LlmExecutionTools
14
+ * Tip: Use `createLlmToolsFromEnv()` to use all available LLM providers you configured
15
15
  * @see https://github.com/webgptorg/promptbook/?tab=readme-ov-file#llm-execution-tools
16
16
  */
17
17
  llm?: Arrayable<LlmExecutionTools>;
@@ -20,7 +20,10 @@ export type ExecutionTools = {
20
20
  *
21
21
  * Note: You can pass multiple ScriptExecutionTools, they will be tried one by one until one of them supports the script
22
22
  * If none of them supports the script, an error is thrown
23
+ * Tip: Use here `new JavascriptExecutionTools()`
24
+ *
23
25
  * @see https://github.com/webgptorg/promptbook/?tab=readme-ov-file#script-execution-tools
26
+ * @default [] - If not provided, no script execution will be possible
24
27
  */
25
28
  script?: Arrayable<ScriptExecutionTools>;
26
29
  /**
@@ -1,64 +1,18 @@
1
1
  import type { Promisable } from 'type-fest';
2
- import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
3
- import { PipelineExecutionError } from '../errors/PipelineExecutionError';
4
2
  import type { TaskProgress } from '../types/TaskProgress';
5
- import type { ExecutionReportJson } from '../types/execution-report/ExecutionReportJson';
6
3
  import type { Parameters } from '../types/typeAliases';
7
- import type { PromptResultUsage } from './PromptResultUsage';
4
+ import type { PipelineExecutorResult } from './PipelineExecutorResult';
8
5
  /**
9
6
  * Executor is a simple async function that takes INPUT PARAMETERs and returns result parameters _(along with all intermediate parameters and INPUT PARAMETERs = it extends input object)_.
10
7
  * Executor is made by combining execution tools and pipeline collection.
11
8
  *
12
9
  * It can be created with `createPipelineExecutor` function.
13
10
  *
14
- * @@@ almost-JSON (what about errors)
15
- *
16
11
  * @see https://github.com/webgptorg/promptbook#executor
17
12
  */
18
13
  export type PipelineExecutor = {
19
14
  (inputParameters: Parameters, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<PipelineExecutorResult>;
20
15
  };
21
- /**
22
- * @@@
23
- *
24
- * @@@ almost-JSON (what about errors)
25
- */
26
- export type PipelineExecutorResult = {
27
- /**
28
- * Result parameters of the execution
29
- *
30
- * Note: If the execution was not successful, there are only some of the result parameters
31
- */
32
- readonly outputParameters: Parameters;
33
- /**
34
- * Whether the execution was successful, details are aviable in `executionReport`
35
- */
36
- readonly isSuccessful: boolean;
37
- /**
38
- * Added usage of whole execution, detailed usage is aviable in `executionReport`
39
- */
40
- readonly usage: PromptResultUsage;
41
- /**
42
- * Errors that occured during the execution, details are aviable in `executionReport`
43
- */
44
- readonly errors: Array<PipelineExecutionError | Error>;
45
- /**
46
- * Warnings that occured during the execution, details are aviable in `executionReport`
47
- */
48
- readonly warnings: Array<PipelineExecutionError | Error>;
49
- /**
50
- * The report of the execution with all details
51
- */
52
- readonly executionReport: ExecutionReportJson;
53
- /**
54
- * The prepared pipeline that was used for the execution
55
- *
56
- * Note: If you called `createPipelineExecutor` with fully prepared pipeline, this is the same object as this pipeline
57
- * If you passed not fully prepared pipeline, this is same pipeline but fully prepared
58
- */
59
- readonly preparedPipeline: PipelineJson;
60
- };
61
16
  /**
62
17
  * TODO: [🧠] Should this file be in /execution or /types folder?
63
- * TODO: [💷] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result - BUT maybe NOT?
64
18
  */
@@ -0,0 +1,49 @@
1
+ import type { ExecutionReportJson } from '../types/execution-report/ExecutionReportJson';
2
+ import type { Parameters } from '../types/typeAliases';
3
+ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
4
+ import type { ErrorJson } from '../errors/utils/ErrorJson';
5
+ import type { PromptResultUsage } from './PromptResultUsage';
6
+ /**
7
+ * @@@
8
+ *
9
+ * Note: [🚉] This is fully serializable as JSON
10
+ */
11
+ export type PipelineExecutorResult = {
12
+ /**
13
+ * Result parameters of the execution
14
+ *
15
+ * Note: If the execution was not successful, there are only some of the result parameters
16
+ */
17
+ readonly outputParameters: Parameters;
18
+ /**
19
+ * Whether the execution was successful, details are aviable in `executionReport`
20
+ */
21
+ readonly isSuccessful: boolean;
22
+ /**
23
+ * Added usage of whole execution, detailed usage is aviable in `executionReport`
24
+ */
25
+ readonly usage: PromptResultUsage;
26
+ /**
27
+ * Errors that occured during the execution, details are aviable in `executionReport`
28
+ */
29
+ readonly errors: Array<ErrorJson>;
30
+ /**
31
+ * Warnings that occured during the execution, details are aviable in `executionReport`
32
+ */
33
+ readonly warnings: Array<ErrorJson>;
34
+ /**
35
+ * The report of the execution with all details
36
+ */
37
+ readonly executionReport: ExecutionReportJson;
38
+ /**
39
+ * The prepared pipeline that was used for the execution
40
+ *
41
+ * Note: If you called `createPipelineExecutor` with fully prepared pipeline, this is the same object as this pipeline
42
+ * If you passed not fully prepared pipeline, this is same pipeline but fully prepared
43
+ */
44
+ readonly preparedPipeline: PipelineJson;
45
+ };
46
+ /**
47
+ * TODO: [🧠] Should this file be in /execution or /types folder?
48
+ * TODO: [🧠] Maybe constrain `ErrorJson` -> `ErrorJson & { name: 'PipelineExecutionError' | 'Error' }`
49
+ */
@@ -15,19 +15,19 @@ export type PromptResult = CompletionPromptResult | ChatPromptResult | Embedding
15
15
  * Completion prompt result
16
16
  *
17
17
  * Note:It contains only the newly generated text NOT the whole completion
18
- * Note: This is fully serializable as JSON
18
+ * Note: [🚉] This is fully serializable as JSON
19
19
  */
20
20
  export type CompletionPromptResult = CommonPromptResult;
21
21
  /**
22
22
  *Chat prompt result
23
23
  *
24
- * Note: This is fully serializable as JSON
24
+ * Note: [🚉] This is fully serializable as JSON
25
25
  */
26
26
  export type ChatPromptResult = CommonPromptResult & {};
27
27
  /**
28
28
  * Embedding prompt result
29
29
  *
30
- * Note: This is fully serializable as JSON
30
+ * Note: [🚉] This is fully serializable as JSON
31
31
  */
32
32
  export type EmbeddingPromptResult = Omit<CommonPromptResult, 'content'> & {
33
33
  /**
@@ -38,7 +38,7 @@ export type EmbeddingPromptResult = Omit<CommonPromptResult, 'content'> & {
38
38
  /**
39
39
  * Common properties for all prompt results
40
40
  *
41
- * Note: This is fully serializable as JSON
41
+ * Note: [🚉] This is fully serializable as JSON
42
42
  */
43
43
  export type CommonPromptResult = {
44
44
  /**
@@ -90,6 +90,7 @@ export type CommonPromptResult = {
90
90
  readonly rawResponse: TODO_object;
91
91
  };
92
92
  /**
93
+ * TODO: !!!!!! [🚉] Check each provider that rawResponse is fully serializable as JSON
93
94
  * TODO: [🧠] Maybe timing more accurate then seconds?
94
95
  * TODO: [🧠] Should here be link to the prompt?
95
96
  * TODO: [🧠] Maybe type `rawResponse` properly - not onject but OpenAI.result.whatever
@@ -3,6 +3,8 @@ import type { ExpectationUnit } from '../types/PipelineJson/Expectations';
3
3
  import type { UncertainNumber } from './UncertainNumber';
4
4
  /**
5
5
  * Usage statistics for one or many prompt results
6
+ *
7
+ * Note: [🚉] This is fully serializable as JSON
6
8
  */
7
9
  export type PromptResultUsage = {
8
10
  /**
@@ -22,6 +24,8 @@ export type PromptResultUsage = {
22
24
  };
23
25
  /**
24
26
  * Record of all possible measurable units
27
+ *
28
+ * Note: [🚉] This is fully serializable as JSON
25
29
  */
26
30
  export type PromptResultUsageCounts = Record<`${KebabCase<'TOKENS' | ExpectationUnit>}Count`, UncertainNumber>;
27
31
  /**
@@ -3,6 +3,7 @@ import type { number_usd } from '../types/typeAliases';
3
3
  /**
4
4
  * Number which can be uncertain
5
5
  *
6
+ * Note: [🚉] This is fully serializable as JSON
6
7
  * Note: If the value is completelly unknown, the value 0 and isUncertain is true
7
8
  * Note: Not using NaN or null because it looses the value which is better to be uncertain then not to be at all
8
9
  */
@@ -1,8 +1,8 @@
1
1
  import type { PipelineExecutor } from './PipelineExecutor';
2
2
  /**
3
- * Asserts that the execution of a promptnook is successful
3
+ * Asserts that the execution of a Promptbook is successful
4
4
  *
5
- * @param executionResult - The partial result of the promptnook execution
5
+ * @param executionResult - The partial result of the Promptbook execution
6
6
  * @throws {PipelineExecutionError} If the execution is not successful or if multiple errors occurred
7
7
  * @public exported from `@promptbook/core`
8
8
  */
@@ -25,5 +25,4 @@ export type CacheItem = {
25
25
  };
26
26
  /**
27
27
  * TODO: [🧠] Should be this exported alongsite `cacheLlmTools` through `@promptbook/utils` OR through `@promptbook/types`
28
- * TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable, fix it
29
28
  */
@@ -1,5 +1,5 @@
1
- import type { PostprocessingFunction } from '../../scripting/javascript/JavascriptExecutionToolsOptions';
2
1
  import type { Expectations } from '../../types/PipelineJson/Expectations';
2
+ import type { string_postprocessing_function_name } from '../../types/typeAliases';
3
3
  /**
4
4
  * Gets the expectations and creates a fake text that meets the expectations
5
5
  *
@@ -9,7 +9,7 @@ import type { Expectations } from '../../types/PipelineJson/Expectations';
9
9
  *
10
10
  * @private internal utility for MockedFackedLlmExecutionTools
11
11
  */
12
- export declare function $fakeTextToExpectations(expectations: Expectations, postprocessing?: Array<PostprocessingFunction>): Promise<string>;
12
+ export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: Array<string_postprocessing_function_name>): Promise<string>;
13
13
  /**
14
14
  * TODO: [💝] Unite object for expecting amount and format - use here also a format
15
15
  */
@@ -29,15 +29,15 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
29
29
  /**
30
30
  * Fakes chat model
31
31
  */
32
- callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<ChatPromptResult & CompletionPromptResult>;
32
+ callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessingFunctionNames'>): Promise<ChatPromptResult & CompletionPromptResult>;
33
33
  /**
34
34
  * Fakes completion model
35
35
  */
36
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<CompletionPromptResult>;
36
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessingFunctionNames'>): Promise<CompletionPromptResult>;
37
37
  /**
38
38
  * Fakes embedding model
39
39
  */
40
- callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<EmbeddingPromptResult>;
40
+ callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessingFunctionNames'>): Promise<EmbeddingPromptResult>;
41
41
  }
42
42
  /**
43
43
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
@@ -54,6 +54,7 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
54
54
  private callCommonModel;
55
55
  }
56
56
  /**
57
+ * TODO: Maybe use `$asDeeplyFrozenSerializableJson`
57
58
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
58
59
  * TODO: [🍓] Allow to list compatible models with each variant
59
60
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
@@ -1,11 +1,7 @@
1
+ import type { ErrorJson } from '../../../errors/utils/ErrorJson';
1
2
  /**
2
3
  * Socket.io error for remote text generation
3
4
  *
4
5
  * This is sent from server to client when error occurs and stops the process
5
6
  */
6
- export type PromptbookServer_Error = {
7
- /**
8
- * The error message which caused the error
9
- */
10
- readonly errorMessage: string;
11
- };
7
+ export type PromptbookServer_Error = ErrorJson;
@@ -11,6 +11,7 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
11
11
  */
12
12
  export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
13
13
  /**
14
+ * TODO: Maybe use `$asDeeplyFrozenSerializableJson`
14
15
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
15
16
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
16
17
  * TODO: Handle progress - support streaming
@@ -1,6 +1,6 @@
1
1
  import type { Promisable } from 'type-fest';
2
2
  import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
3
- import type { string_javascript_name } from '../../types/typeAliases';
3
+ import type { string_postprocessing_function_name } from '../../types/typeAliases';
4
4
  /**
5
5
  * Options for `JavascriptExecutionTools`
6
6
  */
@@ -15,7 +15,7 @@ export type JavascriptExecutionToolsOptions = CommonExecutionToolsOptions & {
15
15
  * Note: There are also some built-in functions available:
16
16
  * @see ./JavascriptEvalExecutionTools.ts
17
17
  */
18
- functions?: Record<string_javascript_name, PostprocessingFunction>;
18
+ functions?: Record<string_postprocessing_function_name, PostprocessingFunction>;
19
19
  };
20
20
  /**
21
21
  * Function that can be used to postprocess the output of the LLM
@@ -20,5 +20,5 @@ export type PromptbookStorage<TItem> = {
20
20
  removeItem(key: string): Promisable<void>;
21
21
  };
22
22
  /**
23
- * TODO: [🧠][🛫] Constrain `TItem` to JSON-serializable objects only
23
+ * TODO: [💺] Constrain `TItem` to [🚉] JSON-serializable objects only in type level
24
24
  */
@@ -7,14 +7,14 @@ import type { string_system_message } from './typeAliases';
7
7
  * Abstract way to specify the LLM.
8
8
  * It does not specify the LLM with concrete version itself, only the requirements for the LLM.
9
9
  *
10
- * Note: This is fully serializable as JSON
10
+ * Note: [🚉] This is fully serializable as JSON
11
11
  * @see https://github.com/webgptorg/promptbook#model-requirements
12
12
  */
13
13
  export type ModelRequirements = CompletionModelRequirements | ChatModelRequirements | EmbeddingModelRequirements;
14
14
  /**
15
15
  * Model requirements for the completion variant
16
16
  *
17
- * Note: This is fully serializable as JSON
17
+ * Note: [🚉] This is fully serializable as JSON
18
18
  */
19
19
  export type CompletionModelRequirements = CommonModelRequirements & {
20
20
  /**
@@ -25,7 +25,7 @@ export type CompletionModelRequirements = CommonModelRequirements & {
25
25
  /**
26
26
  * Model requirements for the chat variant
27
27
  *
28
- * Note: This is fully serializable as JSON
28
+ * Note: [🚉] This is fully serializable as JSON
29
29
  */
30
30
  export type ChatModelRequirements = CommonModelRequirements & {
31
31
  /**
@@ -40,7 +40,7 @@ export type ChatModelRequirements = CommonModelRequirements & {
40
40
  /**
41
41
  * Model requirements for the embedding variant
42
42
  *
43
- * Note: This is fully serializable as JSON
43
+ * Note: [🚉] This is fully serializable as JSON
44
44
  */
45
45
  export type EmbeddingModelRequirements = CommonModelRequirements & {
46
46
  /**
@@ -51,7 +51,7 @@ export type EmbeddingModelRequirements = CommonModelRequirements & {
51
51
  /**
52
52
  * Common properties for all model requirements variants
53
53
  *
54
- * Note: This is fully serializable as JSON
54
+ * Note: [🚉] This is fully serializable as JSON
55
55
  */
56
56
  export type CommonModelRequirements = {
57
57
  /**
@@ -7,7 +7,7 @@ import type { number_positive } from '../typeAliases';
7
7
  * For example 5 words, 3 sentences, 2 paragraphs, ...
8
8
  *
9
9
  * Note: Expectations are performed after all postprocessing steps
10
- *
10
+ * Note: [🚉] This is fully serializable as JSON
11
11
  * @see https://github.com/webgptorg/promptbook/discussions/30
12
12
  */
13
13
  export type Expectations = Partial<Record<Lowercase<ExpectationUnit>, {
@@ -17,6 +17,7 @@ export type Expectations = Partial<Record<Lowercase<ExpectationUnit>, {
17
17
  /**
18
18
  * Unit of text measurement
19
19
  *
20
+ * Note: [🚉] This is fully serializable as JSON
20
21
  * @see https://github.com/webgptorg/promptbook/discussions/30
21
22
  */
22
23
  export type ExpectationUnit = TupleToUnion<typeof EXPECTATION_UNITS>;
@@ -30,6 +31,7 @@ export declare const EXPECTATION_UNITS: readonly ["CHARACTERS", "WORDS", "SENTEN
30
31
  /**
31
32
  * Amount of text measurement
32
33
  *
34
+ * Note: [🚉] This is fully serializable as JSON
33
35
  * @see https://github.com/webgptorg/promptbook/discussions/30
34
36
  */
35
37
  export type ExpectationAmount = number_integer & (number_positive | 0);
@@ -11,6 +11,8 @@ import type { string_name } from '../typeAliases';
11
11
  *
12
12
  * Note: Knowledge piece is by definition prepared
13
13
  *
14
+ * Note: [🚉] This is fully serializable as JSON
15
+ *
14
16
  * @see https://github.com/webgptorg/promptbook/discussions/41
15
17
  */
16
18
  export type KnowledgePiecePreparedJson = {
@@ -5,6 +5,8 @@ import type { string_name } from '../typeAliases';
5
5
  * Defines one source of knowledge in the pipeline
6
6
  * For example, a source of information, a fact, a quote, a definition, website, etc.
7
7
  *
8
+ * Note: [🚉] This is fully serializable as JSON
9
+ *
8
10
  * @see https://github.com/webgptorg/promptbook/discussions/41
9
11
  */
10
12
  export type KnowledgeSourceJson = {
@@ -20,6 +22,8 @@ export type KnowledgeSourceJson = {
20
22
  /**
21
23
  * Defines one source of knowledge in the pipeline after it has been prepared
22
24
  *
25
+ * Note: [🚉] This is fully serializable as JSON
26
+ *
23
27
  * @see https://github.com/webgptorg/promptbook/discussions/41
24
28
  */
25
29
  export type KnowledgeSourcePreparedJson = KnowledgeSourceJson & {
@@ -3,6 +3,8 @@ import type { string_name } from '../typeAliases';
3
3
  import type { PromptTemplateJsonCommon } from './PromptTemplateJsonCommon';
4
4
  /**
5
5
  * Template for prompt to LLM
6
+ *
7
+ * Note: [🚉] This is fully serializable as JSON
6
8
  */
7
9
  export type LlmTemplateJson = PromptTemplateJsonCommon & {
8
10
  readonly blockType: 'PROMPT_TEMPLATE';
@@ -5,6 +5,8 @@ import type { string_persona_description } from '../typeAliases';
5
5
  /**
6
6
  * Defines a persona in the pipeline
7
7
  *
8
+ * Note: [🚉] This is fully serializable as JSON
9
+ *
8
10
  * @see https://github.com/webgptorg/promptbook/discussions/22
9
11
  */
10
12
  export type PersonaJson = {
@@ -22,6 +24,8 @@ export type PersonaJson = {
22
24
  /**
23
25
  * Defines a persona in the pipeline after it has been prepared
24
26
  *
27
+ * Note: [🚉] This is fully serializable as JSON
28
+ *
25
29
  * @see https://github.com/webgptorg/promptbook/discussions/22
26
30
  */
27
31
  export type PersonaPreparedJson = PersonaJson & {
@@ -14,6 +14,8 @@ import type { PromptTemplateParameterJson } from './PromptTemplateParameterJson'
14
14
  * Promptbook is the **core concept of this package**.
15
15
  * It represents a series of prompt templates chained together to form a pipeline / one big prompt template with input and result parameters.
16
16
  *
17
+ * Note: [🚉] This is fully serializable as JSON
18
+ *
17
19
  * @see @@@ https://github.com/webgptorg/promptbook#promptbook
18
20
  */
19
21
  export type PipelineJson = {
@@ -2,6 +2,7 @@ import type { PromptTemplateJsonCommon } from './PromptTemplateJsonCommon';
2
2
  /**
3
3
  * Template for prompt to user
4
4
  *
5
+ * Note: [🚉] This is fully serializable as JSON
5
6
  * @see https://github.com/webgptorg/promptbook/discussions/76
6
7
  */
7
8
  export type PromptDialogJson = PromptTemplateJsonCommon & {
@@ -5,6 +5,8 @@ import type { ScriptJson } from './ScriptJson';
5
5
  import type { SimpleTemplateJson } from './SimpleTemplateJson';
6
6
  /**
7
7
  * Describes one prompt template in the promptbook
8
+ *
9
+ * Note: [🚉] This is fully serializable as JSON
8
10
  */
9
11
  export type PromptTemplateJson = LlmTemplateJson | SimpleTemplateJson | ScriptJson | PromptDialogJson | ___ | ___ | ___ | ___;
10
12
  /**