@promptbook/browser 0.66.0-9 → 0.67.0-0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +218 -3
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +6 -4
- package/esm/typings/src/_packages/types.index.d.ts +7 -1
- package/esm/typings/src/_packages/utils.index.d.ts +14 -8
- package/esm/typings/src/commands/EXPECT/ExpectFormatCommand.d.ts +2 -0
- package/esm/typings/src/config.d.ts +6 -0
- package/esm/typings/src/errors/{ReferenceError.d.ts → PipelineUrlError.d.ts} +2 -2
- package/esm/typings/src/errors/index.d.ts +27 -0
- package/esm/typings/src/errors/utils/ErrorJson.d.ts +20 -0
- package/esm/typings/src/errors/utils/deserializeError.d.ts +7 -0
- package/esm/typings/src/errors/utils/deserializeError.test.d.ts +1 -0
- package/esm/typings/src/errors/utils/serializeError.d.ts +7 -0
- package/esm/typings/src/errors/utils/serializeError.test.d.ts +1 -0
- package/esm/typings/src/execution/ExecutionTools.d.ts +4 -1
- package/esm/typings/src/execution/PipelineExecutor.d.ts +1 -47
- package/esm/typings/src/execution/PipelineExecutorResult.d.ts +49 -0
- package/esm/typings/src/execution/PromptResult.d.ts +5 -4
- package/esm/typings/src/execution/PromptResultUsage.d.ts +4 -0
- package/esm/typings/src/execution/UncertainNumber.d.ts +1 -0
- package/esm/typings/src/execution/assertsExecutionSuccessful.d.ts +2 -2
- package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +0 -1
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +2 -2
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_Error.d.ts +2 -6
- package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
- package/esm/typings/src/scripting/javascript/JavascriptExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/storage/_common/PromptbookStorage.d.ts +1 -1
- package/esm/typings/src/types/ModelRequirements.d.ts +5 -5
- package/esm/typings/src/types/PipelineJson/Expectations.d.ts +3 -1
- package/esm/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +2 -0
- package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +4 -0
- package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +2 -0
- package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +4 -0
- package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +2 -0
- package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PromptTemplateJson.d.ts +2 -0
- package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +2 -2
- package/esm/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +2 -0
- package/esm/typings/src/types/PipelineJson/ScriptJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +1 -0
- package/esm/typings/src/types/Prompt.d.ts +7 -7
- package/esm/typings/src/types/ScriptLanguage.d.ts +2 -0
- package/esm/typings/src/types/execution-report/ExecutionPromptReportJson.d.ts +24 -0
- package/esm/typings/src/types/execution-report/ExecutionReportJson.d.ts +3 -20
- package/esm/typings/src/types/typeAliases.d.ts +7 -0
- package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +1 -4
- package/esm/typings/src/utils/serialization/$asDeeplyFrozenSerializableJson.d.ts +17 -0
- package/esm/typings/src/utils/{deepFreeze.d.ts → serialization/$deepFreeze.d.ts} +0 -10
- package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +27 -0
- package/esm/typings/src/utils/{clonePipeline.d.ts → serialization/clonePipeline.d.ts} +1 -1
- package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +24 -0
- package/esm/typings/src/utils/serialization/isSerializableAsJson.test.d.ts +1 -0
- package/package.json +2 -2
- package/umd/index.umd.js +225 -6
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/errors/VersionMismatchError.d.ts +0 -10
- /package/esm/typings/src/utils/{deepClone.d.ts → serialization/deepClone.d.ts} +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -11,7 +11,7 @@ export type ExecutionTools = {
|
|
|
11
11
|
/**
|
|
12
12
|
* Tools for executing prompts to large language models like GPT-4
|
|
13
13
|
*
|
|
14
|
-
* Tip:
|
|
14
|
+
* Tip: Use `createLlmToolsFromEnv()` to use all available LLM providers you configured
|
|
15
15
|
* @see https://github.com/webgptorg/promptbook/?tab=readme-ov-file#llm-execution-tools
|
|
16
16
|
*/
|
|
17
17
|
llm?: Arrayable<LlmExecutionTools>;
|
|
@@ -20,7 +20,10 @@ export type ExecutionTools = {
|
|
|
20
20
|
*
|
|
21
21
|
* Note: You can pass multiple ScriptExecutionTools, they will be tried one by one until one of them supports the script
|
|
22
22
|
* If none of them supports the script, an error is thrown
|
|
23
|
+
* Tip: Use here `new JavascriptExecutionTools()`
|
|
24
|
+
*
|
|
23
25
|
* @see https://github.com/webgptorg/promptbook/?tab=readme-ov-file#script-execution-tools
|
|
26
|
+
* @default [] - If not provided, no script execution will be possible
|
|
24
27
|
*/
|
|
25
28
|
script?: Arrayable<ScriptExecutionTools>;
|
|
26
29
|
/**
|
|
@@ -1,64 +1,18 @@
|
|
|
1
1
|
import type { Promisable } from 'type-fest';
|
|
2
|
-
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
|
|
3
|
-
import { PipelineExecutionError } from '../errors/PipelineExecutionError';
|
|
4
2
|
import type { TaskProgress } from '../types/TaskProgress';
|
|
5
|
-
import type { ExecutionReportJson } from '../types/execution-report/ExecutionReportJson';
|
|
6
3
|
import type { Parameters } from '../types/typeAliases';
|
|
7
|
-
import type {
|
|
4
|
+
import type { PipelineExecutorResult } from './PipelineExecutorResult';
|
|
8
5
|
/**
|
|
9
6
|
* Executor is a simple async function that takes INPUT PARAMETERs and returns result parameters _(along with all intermediate parameters and INPUT PARAMETERs = it extends input object)_.
|
|
10
7
|
* Executor is made by combining execution tools and pipeline collection.
|
|
11
8
|
*
|
|
12
9
|
* It can be created with `createPipelineExecutor` function.
|
|
13
10
|
*
|
|
14
|
-
* @@@ almost-JSON (what about errors)
|
|
15
|
-
*
|
|
16
11
|
* @see https://github.com/webgptorg/promptbook#executor
|
|
17
12
|
*/
|
|
18
13
|
export type PipelineExecutor = {
|
|
19
14
|
(inputParameters: Parameters, onProgress?: (taskProgress: TaskProgress) => Promisable<void>): Promise<PipelineExecutorResult>;
|
|
20
15
|
};
|
|
21
|
-
/**
|
|
22
|
-
* @@@
|
|
23
|
-
*
|
|
24
|
-
* @@@ almost-JSON (what about errors)
|
|
25
|
-
*/
|
|
26
|
-
export type PipelineExecutorResult = {
|
|
27
|
-
/**
|
|
28
|
-
* Result parameters of the execution
|
|
29
|
-
*
|
|
30
|
-
* Note: If the execution was not successful, there are only some of the result parameters
|
|
31
|
-
*/
|
|
32
|
-
readonly outputParameters: Parameters;
|
|
33
|
-
/**
|
|
34
|
-
* Whether the execution was successful, details are aviable in `executionReport`
|
|
35
|
-
*/
|
|
36
|
-
readonly isSuccessful: boolean;
|
|
37
|
-
/**
|
|
38
|
-
* Added usage of whole execution, detailed usage is aviable in `executionReport`
|
|
39
|
-
*/
|
|
40
|
-
readonly usage: PromptResultUsage;
|
|
41
|
-
/**
|
|
42
|
-
* Errors that occured during the execution, details are aviable in `executionReport`
|
|
43
|
-
*/
|
|
44
|
-
readonly errors: Array<PipelineExecutionError | Error>;
|
|
45
|
-
/**
|
|
46
|
-
* Warnings that occured during the execution, details are aviable in `executionReport`
|
|
47
|
-
*/
|
|
48
|
-
readonly warnings: Array<PipelineExecutionError | Error>;
|
|
49
|
-
/**
|
|
50
|
-
* The report of the execution with all details
|
|
51
|
-
*/
|
|
52
|
-
readonly executionReport: ExecutionReportJson;
|
|
53
|
-
/**
|
|
54
|
-
* The prepared pipeline that was used for the execution
|
|
55
|
-
*
|
|
56
|
-
* Note: If you called `createPipelineExecutor` with fully prepared pipeline, this is the same object as this pipeline
|
|
57
|
-
* If you passed not fully prepared pipeline, this is same pipeline but fully prepared
|
|
58
|
-
*/
|
|
59
|
-
readonly preparedPipeline: PipelineJson;
|
|
60
|
-
};
|
|
61
16
|
/**
|
|
62
17
|
* TODO: [🧠] Should this file be in /execution or /types folder?
|
|
63
|
-
* TODO: [💷] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result - BUT maybe NOT?
|
|
64
18
|
*/
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import type { ExecutionReportJson } from '../types/execution-report/ExecutionReportJson';
|
|
2
|
+
import type { Parameters } from '../types/typeAliases';
|
|
3
|
+
import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
|
|
4
|
+
import type { ErrorJson } from '../errors/utils/ErrorJson';
|
|
5
|
+
import type { PromptResultUsage } from './PromptResultUsage';
|
|
6
|
+
/**
|
|
7
|
+
* @@@
|
|
8
|
+
*
|
|
9
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
10
|
+
*/
|
|
11
|
+
export type PipelineExecutorResult = {
|
|
12
|
+
/**
|
|
13
|
+
* Result parameters of the execution
|
|
14
|
+
*
|
|
15
|
+
* Note: If the execution was not successful, there are only some of the result parameters
|
|
16
|
+
*/
|
|
17
|
+
readonly outputParameters: Parameters;
|
|
18
|
+
/**
|
|
19
|
+
* Whether the execution was successful, details are aviable in `executionReport`
|
|
20
|
+
*/
|
|
21
|
+
readonly isSuccessful: boolean;
|
|
22
|
+
/**
|
|
23
|
+
* Added usage of whole execution, detailed usage is aviable in `executionReport`
|
|
24
|
+
*/
|
|
25
|
+
readonly usage: PromptResultUsage;
|
|
26
|
+
/**
|
|
27
|
+
* Errors that occured during the execution, details are aviable in `executionReport`
|
|
28
|
+
*/
|
|
29
|
+
readonly errors: Array<ErrorJson>;
|
|
30
|
+
/**
|
|
31
|
+
* Warnings that occured during the execution, details are aviable in `executionReport`
|
|
32
|
+
*/
|
|
33
|
+
readonly warnings: Array<ErrorJson>;
|
|
34
|
+
/**
|
|
35
|
+
* The report of the execution with all details
|
|
36
|
+
*/
|
|
37
|
+
readonly executionReport: ExecutionReportJson;
|
|
38
|
+
/**
|
|
39
|
+
* The prepared pipeline that was used for the execution
|
|
40
|
+
*
|
|
41
|
+
* Note: If you called `createPipelineExecutor` with fully prepared pipeline, this is the same object as this pipeline
|
|
42
|
+
* If you passed not fully prepared pipeline, this is same pipeline but fully prepared
|
|
43
|
+
*/
|
|
44
|
+
readonly preparedPipeline: PipelineJson;
|
|
45
|
+
};
|
|
46
|
+
/**
|
|
47
|
+
* TODO: [🧠] Should this file be in /execution or /types folder?
|
|
48
|
+
* TODO: [🧠] Maybe constrain `ErrorJson` -> `ErrorJson & { name: 'PipelineExecutionError' | 'Error' }`
|
|
49
|
+
*/
|
|
@@ -15,19 +15,19 @@ export type PromptResult = CompletionPromptResult | ChatPromptResult | Embedding
|
|
|
15
15
|
* Completion prompt result
|
|
16
16
|
*
|
|
17
17
|
* Note:It contains only the newly generated text NOT the whole completion
|
|
18
|
-
* Note: This is fully serializable as JSON
|
|
18
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
19
19
|
*/
|
|
20
20
|
export type CompletionPromptResult = CommonPromptResult;
|
|
21
21
|
/**
|
|
22
22
|
*Chat prompt result
|
|
23
23
|
*
|
|
24
|
-
* Note: This is fully serializable as JSON
|
|
24
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
25
25
|
*/
|
|
26
26
|
export type ChatPromptResult = CommonPromptResult & {};
|
|
27
27
|
/**
|
|
28
28
|
* Embedding prompt result
|
|
29
29
|
*
|
|
30
|
-
* Note: This is fully serializable as JSON
|
|
30
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
31
31
|
*/
|
|
32
32
|
export type EmbeddingPromptResult = Omit<CommonPromptResult, 'content'> & {
|
|
33
33
|
/**
|
|
@@ -38,7 +38,7 @@ export type EmbeddingPromptResult = Omit<CommonPromptResult, 'content'> & {
|
|
|
38
38
|
/**
|
|
39
39
|
* Common properties for all prompt results
|
|
40
40
|
*
|
|
41
|
-
* Note: This is fully serializable as JSON
|
|
41
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
42
42
|
*/
|
|
43
43
|
export type CommonPromptResult = {
|
|
44
44
|
/**
|
|
@@ -90,6 +90,7 @@ export type CommonPromptResult = {
|
|
|
90
90
|
readonly rawResponse: TODO_object;
|
|
91
91
|
};
|
|
92
92
|
/**
|
|
93
|
+
* TODO: !!!!!! [🚉] Check each provider that rawResponse is fully serializable as JSON
|
|
93
94
|
* TODO: [🧠] Maybe timing more accurate then seconds?
|
|
94
95
|
* TODO: [🧠] Should here be link to the prompt?
|
|
95
96
|
* TODO: [🧠] Maybe type `rawResponse` properly - not onject but OpenAI.result.whatever
|
|
@@ -3,6 +3,8 @@ import type { ExpectationUnit } from '../types/PipelineJson/Expectations';
|
|
|
3
3
|
import type { UncertainNumber } from './UncertainNumber';
|
|
4
4
|
/**
|
|
5
5
|
* Usage statistics for one or many prompt results
|
|
6
|
+
*
|
|
7
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
6
8
|
*/
|
|
7
9
|
export type PromptResultUsage = {
|
|
8
10
|
/**
|
|
@@ -22,6 +24,8 @@ export type PromptResultUsage = {
|
|
|
22
24
|
};
|
|
23
25
|
/**
|
|
24
26
|
* Record of all possible measurable units
|
|
27
|
+
*
|
|
28
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
25
29
|
*/
|
|
26
30
|
export type PromptResultUsageCounts = Record<`${KebabCase<'TOKENS' | ExpectationUnit>}Count`, UncertainNumber>;
|
|
27
31
|
/**
|
|
@@ -3,6 +3,7 @@ import type { number_usd } from '../types/typeAliases';
|
|
|
3
3
|
/**
|
|
4
4
|
* Number which can be uncertain
|
|
5
5
|
*
|
|
6
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
6
7
|
* Note: If the value is completelly unknown, the value 0 and isUncertain is true
|
|
7
8
|
* Note: Not using NaN or null because it looses the value which is better to be uncertain then not to be at all
|
|
8
9
|
*/
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import type { PipelineExecutor } from './PipelineExecutor';
|
|
2
2
|
/**
|
|
3
|
-
* Asserts that the execution of a
|
|
3
|
+
* Asserts that the execution of a Promptbook is successful
|
|
4
4
|
*
|
|
5
|
-
* @param executionResult - The partial result of the
|
|
5
|
+
* @param executionResult - The partial result of the Promptbook execution
|
|
6
6
|
* @throws {PipelineExecutionError} If the execution is not successful or if multiple errors occurred
|
|
7
7
|
* @public exported from `@promptbook/core`
|
|
8
8
|
*/
|
|
@@ -25,5 +25,4 @@ export type CacheItem = {
|
|
|
25
25
|
};
|
|
26
26
|
/**
|
|
27
27
|
* TODO: [🧠] Should be this exported alongsite `cacheLlmTools` through `@promptbook/utils` OR through `@promptbook/types`
|
|
28
|
-
* TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable, fix it
|
|
29
28
|
*/
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import type { PostprocessingFunction } from '../../scripting/javascript/JavascriptExecutionToolsOptions';
|
|
2
1
|
import type { Expectations } from '../../types/PipelineJson/Expectations';
|
|
2
|
+
import type { string_postprocessing_function_name } from '../../types/typeAliases';
|
|
3
3
|
/**
|
|
4
4
|
* Gets the expectations and creates a fake text that meets the expectations
|
|
5
5
|
*
|
|
@@ -9,7 +9,7 @@ import type { Expectations } from '../../types/PipelineJson/Expectations';
|
|
|
9
9
|
*
|
|
10
10
|
* @private internal utility for MockedFackedLlmExecutionTools
|
|
11
11
|
*/
|
|
12
|
-
export declare function $fakeTextToExpectations(expectations: Expectations,
|
|
12
|
+
export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: Array<string_postprocessing_function_name>): Promise<string>;
|
|
13
13
|
/**
|
|
14
14
|
* TODO: [💝] Unite object for expecting amount and format - use here also a format
|
|
15
15
|
*/
|
|
@@ -29,15 +29,15 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
|
|
|
29
29
|
/**
|
|
30
30
|
* Fakes chat model
|
|
31
31
|
*/
|
|
32
|
-
callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | '
|
|
32
|
+
callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessingFunctionNames'>): Promise<ChatPromptResult & CompletionPromptResult>;
|
|
33
33
|
/**
|
|
34
34
|
* Fakes completion model
|
|
35
35
|
*/
|
|
36
|
-
callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | '
|
|
36
|
+
callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessingFunctionNames'>): Promise<CompletionPromptResult>;
|
|
37
37
|
/**
|
|
38
38
|
* Fakes embedding model
|
|
39
39
|
*/
|
|
40
|
-
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | '
|
|
40
|
+
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessingFunctionNames'>): Promise<EmbeddingPromptResult>;
|
|
41
41
|
}
|
|
42
42
|
/**
|
|
43
43
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
@@ -54,6 +54,7 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
|
|
|
54
54
|
private callCommonModel;
|
|
55
55
|
}
|
|
56
56
|
/**
|
|
57
|
+
* TODO: Maybe use `$asDeeplyFrozenSerializableJson`
|
|
57
58
|
* TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
|
|
58
59
|
* TODO: [🍓] Allow to list compatible models with each variant
|
|
59
60
|
* TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
|
|
@@ -1,11 +1,7 @@
|
|
|
1
|
+
import type { ErrorJson } from '../../../errors/utils/ErrorJson';
|
|
1
2
|
/**
|
|
2
3
|
* Socket.io error for remote text generation
|
|
3
4
|
*
|
|
4
5
|
* This is sent from server to client when error occurs and stops the process
|
|
5
6
|
*/
|
|
6
|
-
export type PromptbookServer_Error =
|
|
7
|
-
/**
|
|
8
|
-
* The error message which caused the error
|
|
9
|
-
*/
|
|
10
|
-
readonly errorMessage: string;
|
|
11
|
-
};
|
|
7
|
+
export type PromptbookServer_Error = ErrorJson;
|
|
@@ -11,6 +11,7 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
|
|
|
11
11
|
*/
|
|
12
12
|
export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
|
|
13
13
|
/**
|
|
14
|
+
* TODO: Maybe use `$asDeeplyFrozenSerializableJson`
|
|
14
15
|
* TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
|
|
15
16
|
* TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
|
|
16
17
|
* TODO: Handle progress - support streaming
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { Promisable } from 'type-fest';
|
|
2
2
|
import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
|
|
3
|
-
import type {
|
|
3
|
+
import type { string_postprocessing_function_name } from '../../types/typeAliases';
|
|
4
4
|
/**
|
|
5
5
|
* Options for `JavascriptExecutionTools`
|
|
6
6
|
*/
|
|
@@ -15,7 +15,7 @@ export type JavascriptExecutionToolsOptions = CommonExecutionToolsOptions & {
|
|
|
15
15
|
* Note: There are also some built-in functions available:
|
|
16
16
|
* @see ./JavascriptEvalExecutionTools.ts
|
|
17
17
|
*/
|
|
18
|
-
functions?: Record<
|
|
18
|
+
functions?: Record<string_postprocessing_function_name, PostprocessingFunction>;
|
|
19
19
|
};
|
|
20
20
|
/**
|
|
21
21
|
* Function that can be used to postprocess the output of the LLM
|
|
@@ -7,14 +7,14 @@ import type { string_system_message } from './typeAliases';
|
|
|
7
7
|
* Abstract way to specify the LLM.
|
|
8
8
|
* It does not specify the LLM with concrete version itself, only the requirements for the LLM.
|
|
9
9
|
*
|
|
10
|
-
* Note: This is fully serializable as JSON
|
|
10
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
11
11
|
* @see https://github.com/webgptorg/promptbook#model-requirements
|
|
12
12
|
*/
|
|
13
13
|
export type ModelRequirements = CompletionModelRequirements | ChatModelRequirements | EmbeddingModelRequirements;
|
|
14
14
|
/**
|
|
15
15
|
* Model requirements for the completion variant
|
|
16
16
|
*
|
|
17
|
-
* Note: This is fully serializable as JSON
|
|
17
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
18
18
|
*/
|
|
19
19
|
export type CompletionModelRequirements = CommonModelRequirements & {
|
|
20
20
|
/**
|
|
@@ -25,7 +25,7 @@ export type CompletionModelRequirements = CommonModelRequirements & {
|
|
|
25
25
|
/**
|
|
26
26
|
* Model requirements for the chat variant
|
|
27
27
|
*
|
|
28
|
-
* Note: This is fully serializable as JSON
|
|
28
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
29
29
|
*/
|
|
30
30
|
export type ChatModelRequirements = CommonModelRequirements & {
|
|
31
31
|
/**
|
|
@@ -40,7 +40,7 @@ export type ChatModelRequirements = CommonModelRequirements & {
|
|
|
40
40
|
/**
|
|
41
41
|
* Model requirements for the embedding variant
|
|
42
42
|
*
|
|
43
|
-
* Note: This is fully serializable as JSON
|
|
43
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
44
44
|
*/
|
|
45
45
|
export type EmbeddingModelRequirements = CommonModelRequirements & {
|
|
46
46
|
/**
|
|
@@ -51,7 +51,7 @@ export type EmbeddingModelRequirements = CommonModelRequirements & {
|
|
|
51
51
|
/**
|
|
52
52
|
* Common properties for all model requirements variants
|
|
53
53
|
*
|
|
54
|
-
* Note: This is fully serializable as JSON
|
|
54
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
55
55
|
*/
|
|
56
56
|
export type CommonModelRequirements = {
|
|
57
57
|
/**
|
|
@@ -7,7 +7,7 @@ import type { number_positive } from '../typeAliases';
|
|
|
7
7
|
* For example 5 words, 3 sentences, 2 paragraphs, ...
|
|
8
8
|
*
|
|
9
9
|
* Note: Expectations are performed after all postprocessing steps
|
|
10
|
-
*
|
|
10
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
11
11
|
* @see https://github.com/webgptorg/promptbook/discussions/30
|
|
12
12
|
*/
|
|
13
13
|
export type Expectations = Partial<Record<Lowercase<ExpectationUnit>, {
|
|
@@ -17,6 +17,7 @@ export type Expectations = Partial<Record<Lowercase<ExpectationUnit>, {
|
|
|
17
17
|
/**
|
|
18
18
|
* Unit of text measurement
|
|
19
19
|
*
|
|
20
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
20
21
|
* @see https://github.com/webgptorg/promptbook/discussions/30
|
|
21
22
|
*/
|
|
22
23
|
export type ExpectationUnit = TupleToUnion<typeof EXPECTATION_UNITS>;
|
|
@@ -30,6 +31,7 @@ export declare const EXPECTATION_UNITS: readonly ["CHARACTERS", "WORDS", "SENTEN
|
|
|
30
31
|
/**
|
|
31
32
|
* Amount of text measurement
|
|
32
33
|
*
|
|
34
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
33
35
|
* @see https://github.com/webgptorg/promptbook/discussions/30
|
|
34
36
|
*/
|
|
35
37
|
export type ExpectationAmount = number_integer & (number_positive | 0);
|
|
@@ -11,6 +11,8 @@ import type { string_name } from '../typeAliases';
|
|
|
11
11
|
*
|
|
12
12
|
* Note: Knowledge piece is by definition prepared
|
|
13
13
|
*
|
|
14
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
15
|
+
*
|
|
14
16
|
* @see https://github.com/webgptorg/promptbook/discussions/41
|
|
15
17
|
*/
|
|
16
18
|
export type KnowledgePiecePreparedJson = {
|
|
@@ -5,6 +5,8 @@ import type { string_name } from '../typeAliases';
|
|
|
5
5
|
* Defines one source of knowledge in the pipeline
|
|
6
6
|
* For example, a source of information, a fact, a quote, a definition, website, etc.
|
|
7
7
|
*
|
|
8
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
9
|
+
*
|
|
8
10
|
* @see https://github.com/webgptorg/promptbook/discussions/41
|
|
9
11
|
*/
|
|
10
12
|
export type KnowledgeSourceJson = {
|
|
@@ -20,6 +22,8 @@ export type KnowledgeSourceJson = {
|
|
|
20
22
|
/**
|
|
21
23
|
* Defines one source of knowledge in the pipeline after it has been prepared
|
|
22
24
|
*
|
|
25
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
26
|
+
*
|
|
23
27
|
* @see https://github.com/webgptorg/promptbook/discussions/41
|
|
24
28
|
*/
|
|
25
29
|
export type KnowledgeSourcePreparedJson = KnowledgeSourceJson & {
|
|
@@ -3,6 +3,8 @@ import type { string_name } from '../typeAliases';
|
|
|
3
3
|
import type { PromptTemplateJsonCommon } from './PromptTemplateJsonCommon';
|
|
4
4
|
/**
|
|
5
5
|
* Template for prompt to LLM
|
|
6
|
+
*
|
|
7
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
6
8
|
*/
|
|
7
9
|
export type LlmTemplateJson = PromptTemplateJsonCommon & {
|
|
8
10
|
readonly blockType: 'PROMPT_TEMPLATE';
|
|
@@ -5,6 +5,8 @@ import type { string_persona_description } from '../typeAliases';
|
|
|
5
5
|
/**
|
|
6
6
|
* Defines a persona in the pipeline
|
|
7
7
|
*
|
|
8
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
9
|
+
*
|
|
8
10
|
* @see https://github.com/webgptorg/promptbook/discussions/22
|
|
9
11
|
*/
|
|
10
12
|
export type PersonaJson = {
|
|
@@ -22,6 +24,8 @@ export type PersonaJson = {
|
|
|
22
24
|
/**
|
|
23
25
|
* Defines a persona in the pipeline after it has been prepared
|
|
24
26
|
*
|
|
27
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
28
|
+
*
|
|
25
29
|
* @see https://github.com/webgptorg/promptbook/discussions/22
|
|
26
30
|
*/
|
|
27
31
|
export type PersonaPreparedJson = PersonaJson & {
|
|
@@ -14,6 +14,8 @@ import type { PromptTemplateParameterJson } from './PromptTemplateParameterJson'
|
|
|
14
14
|
* Promptbook is the **core concept of this package**.
|
|
15
15
|
* It represents a series of prompt templates chained together to form a pipeline / one big prompt template with input and result parameters.
|
|
16
16
|
*
|
|
17
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
18
|
+
*
|
|
17
19
|
* @see @@@ https://github.com/webgptorg/promptbook#promptbook
|
|
18
20
|
*/
|
|
19
21
|
export type PipelineJson = {
|
|
@@ -2,6 +2,7 @@ import type { PromptTemplateJsonCommon } from './PromptTemplateJsonCommon';
|
|
|
2
2
|
/**
|
|
3
3
|
* Template for prompt to user
|
|
4
4
|
*
|
|
5
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
5
6
|
* @see https://github.com/webgptorg/promptbook/discussions/76
|
|
6
7
|
*/
|
|
7
8
|
export type PromptDialogJson = PromptTemplateJsonCommon & {
|
|
@@ -5,6 +5,8 @@ import type { ScriptJson } from './ScriptJson';
|
|
|
5
5
|
import type { SimpleTemplateJson } from './SimpleTemplateJson';
|
|
6
6
|
/**
|
|
7
7
|
* Describes one prompt template in the promptbook
|
|
8
|
+
*
|
|
9
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
8
10
|
*/
|
|
9
11
|
export type PromptTemplateJson = LlmTemplateJson | SimpleTemplateJson | ScriptJson | PromptDialogJson | ___ | ___ | ___ | ___;
|
|
10
12
|
/**
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
import type { BlockType } from '../../commands/BLOCK/BlockTypes';
|
|
2
2
|
import type { ExpectFormatCommand } from '../../commands/EXPECT/ExpectFormatCommand';
|
|
3
3
|
import type { string_javascript } from '../typeAliases';
|
|
4
|
-
import type { string_javascript_name } from '../typeAliases';
|
|
5
4
|
import type { string_markdown } from '../typeAliases';
|
|
6
5
|
import type { string_markdown_text } from '../typeAliases';
|
|
7
6
|
import type { string_name } from '../typeAliases';
|
|
8
7
|
import type { string_parameter_name } from '../typeAliases';
|
|
8
|
+
import type { string_postprocessing_function_name } from '../typeAliases';
|
|
9
9
|
import type { string_prompt } from '../typeAliases';
|
|
10
10
|
import type { string_template } from '../typeAliases';
|
|
11
11
|
import type { Expectations } from './Expectations';
|
|
@@ -66,7 +66,7 @@ export interface PromptTemplateJsonCommon {
|
|
|
66
66
|
*
|
|
67
67
|
* @see https://github.com/webgptorg/promptbook/discussions/31
|
|
68
68
|
*/
|
|
69
|
-
readonly postprocessingFunctionNames?: Array<
|
|
69
|
+
readonly postprocessingFunctionNames?: Array<string_postprocessing_function_name>;
|
|
70
70
|
/**
|
|
71
71
|
* Expect this amount of each unit in the answer
|
|
72
72
|
*
|
|
@@ -3,6 +3,8 @@ import type { string_parameter_name } from '../typeAliases';
|
|
|
3
3
|
import type { string_parameter_value } from '../typeAliases';
|
|
4
4
|
/**
|
|
5
5
|
* Describes one parameter of the promptbook
|
|
6
|
+
*
|
|
7
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
6
8
|
*/
|
|
7
9
|
export type PromptTemplateParameterJson = {
|
|
8
10
|
/**
|
|
@@ -3,6 +3,7 @@ import type { PromptTemplateJsonCommon } from './PromptTemplateJsonCommon';
|
|
|
3
3
|
/**
|
|
4
4
|
* Template for script execution
|
|
5
5
|
*
|
|
6
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
6
7
|
* @see https://github.com/webgptorg/promptbook/discussions/77
|
|
7
8
|
*/
|
|
8
9
|
export type ScriptJson = PromptTemplateJsonCommon & {
|
|
@@ -2,6 +2,7 @@ import type { PromptTemplateJsonCommon } from './PromptTemplateJsonCommon';
|
|
|
2
2
|
/**
|
|
3
3
|
* Template for simple concatenation of strings
|
|
4
4
|
*
|
|
5
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
5
6
|
* @see https://github.com/webgptorg/promptbook/discussions/17
|
|
6
7
|
*/
|
|
7
8
|
export type SimpleTemplateJson = PromptTemplateJsonCommon & {
|
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import type { ExpectFormatCommand } from '../commands/EXPECT/ExpectFormatCommand';
|
|
2
|
-
import type { PostprocessingFunction } from '../scripting/javascript/JavascriptExecutionToolsOptions';
|
|
3
2
|
import type { ChatModelRequirements } from './ModelRequirements';
|
|
4
3
|
import type { CompletionModelRequirements } from './ModelRequirements';
|
|
5
4
|
import type { EmbeddingModelRequirements } from './ModelRequirements';
|
|
@@ -7,20 +6,21 @@ import type { ModelRequirements } from './ModelRequirements';
|
|
|
7
6
|
import type { Expectations } from './PipelineJson/Expectations';
|
|
8
7
|
import type { Parameters } from './typeAliases';
|
|
9
8
|
import type { string_pipeline_url_with_hashtemplate } from './typeAliases';
|
|
9
|
+
import type { string_postprocessing_function_name } from './typeAliases';
|
|
10
10
|
import type { string_prompt } from './typeAliases';
|
|
11
11
|
import type { string_template } from './typeAliases';
|
|
12
12
|
import type { string_title } from './typeAliases';
|
|
13
13
|
/**
|
|
14
14
|
* Prompt in a text along with model requirements, but without any execution or templating logic.
|
|
15
15
|
*
|
|
16
|
-
* Note: [
|
|
16
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
17
17
|
* @see https://github.com/webgptorg/promptbook#prompt
|
|
18
18
|
*/
|
|
19
19
|
export type Prompt = CompletionPrompt | ChatPrompt | EmbeddingPrompt;
|
|
20
20
|
/**
|
|
21
21
|
* Completion prompt
|
|
22
22
|
*
|
|
23
|
-
* Note: [
|
|
23
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
24
24
|
*/
|
|
25
25
|
export type CompletionPrompt = CommonPrompt & {
|
|
26
26
|
/**
|
|
@@ -31,7 +31,7 @@ export type CompletionPrompt = CommonPrompt & {
|
|
|
31
31
|
/**
|
|
32
32
|
* Chat prompt
|
|
33
33
|
*
|
|
34
|
-
* Note: [
|
|
34
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
35
35
|
*/
|
|
36
36
|
export type ChatPrompt = CommonPrompt & {
|
|
37
37
|
/**
|
|
@@ -42,7 +42,7 @@ export type ChatPrompt = CommonPrompt & {
|
|
|
42
42
|
/**
|
|
43
43
|
* Embedding prompt
|
|
44
44
|
*
|
|
45
|
-
* Note: [
|
|
45
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
46
46
|
*/
|
|
47
47
|
export type EmbeddingPrompt = CommonPrompt & {
|
|
48
48
|
/**
|
|
@@ -53,7 +53,7 @@ export type EmbeddingPrompt = CommonPrompt & {
|
|
|
53
53
|
/**
|
|
54
54
|
* Common properties for all prompt results
|
|
55
55
|
*
|
|
56
|
-
* Note: This is fully serializable as JSON
|
|
56
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
57
57
|
*/
|
|
58
58
|
export type CommonPrompt = {
|
|
59
59
|
/**
|
|
@@ -75,7 +75,7 @@ export type CommonPrompt = {
|
|
|
75
75
|
/**
|
|
76
76
|
* List of postprocessing steps that are executed after the prompt
|
|
77
77
|
*/
|
|
78
|
-
readonly
|
|
78
|
+
readonly postprocessingFunctionNames?: Array<string_postprocessing_function_name>;
|
|
79
79
|
/**
|
|
80
80
|
* Expectations for the answer
|
|
81
81
|
*
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import type { ErrorJson } from '../../errors/utils/ErrorJson';
|
|
2
|
+
import type { PromptResult } from '../../execution/PromptResult';
|
|
3
|
+
import type { Prompt } from '../Prompt';
|
|
4
|
+
/**
|
|
5
|
+
* Report of single prompt execution
|
|
6
|
+
*
|
|
7
|
+
* Note: [🚉] This is fully serializable as JSON
|
|
8
|
+
*/
|
|
9
|
+
export type ExecutionPromptReportJson = {
|
|
10
|
+
/**
|
|
11
|
+
* The prompt wich was executed
|
|
12
|
+
*/
|
|
13
|
+
readonly prompt: Omit<Prompt, 'pipelineUrl'>;
|
|
14
|
+
/**
|
|
15
|
+
* Result of the prompt execution (if not failed during LLM execution)
|
|
16
|
+
*/
|
|
17
|
+
readonly result?: PromptResult;
|
|
18
|
+
/**
|
|
19
|
+
* The error which occured during LLM execution or during postprocessing or expectation checking
|
|
20
|
+
*
|
|
21
|
+
* Note: It makes sense to have both error and result defined, for example when the result not pass expectations
|
|
22
|
+
*/
|
|
23
|
+
readonly error?: ErrorJson;
|
|
24
|
+
};
|