@promptbook/openai 0.61.0-13 → 0.61.0-14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +31 -10
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/types.index.d.ts +6 -3
- package/esm/typings/src/commands/MODEL/ModelCommand.d.ts +2 -2
- package/esm/typings/src/config.d.ts +4 -0
- package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +13 -0
- package/esm/typings/src/conversion/utils/stringifyPipelineJson.test.d.ts +1 -0
- package/esm/typings/src/conversion/validation/_importPipeline.d.ts +10 -1
- package/esm/typings/src/conversion/validation/validatePipeline.d.ts +1 -1
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +7 -7
- package/esm/typings/src/execution/PipelineExecutor.d.ts +1 -1
- package/esm/typings/src/execution/PromptResult.d.ts +14 -56
- package/esm/typings/src/execution/PromptResultUsage.d.ts +26 -0
- package/esm/typings/src/execution/UncertainNumber.d.ts +18 -0
- package/esm/typings/src/execution/utils/addUsage.d.ts +1 -1
- package/esm/typings/src/execution/utils/computeUsageCounts.d.ts +1 -1
- package/esm/typings/src/execution/utils/uncertainNumber.d.ts +1 -1
- package/esm/typings/src/execution/utils/usageToWorktime.d.ts +2 -2
- package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +10 -2
- package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +2 -1
- package/esm/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/utils/count-total-cost/limitTotalCost.d.ts +3 -3
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -4
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -4
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +6 -6
- package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -7
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +6 -6
- package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.d.ts +1 -1
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +9 -7
- package/esm/typings/src/scripting/javascript/utils/unknownToString.d.ts +2 -1
- package/esm/typings/src/types/ModelRequirements.d.ts +53 -14
- package/esm/typings/src/types/ModelVariant.d.ts +14 -0
- package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +2 -4
- package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
- package/esm/typings/src/types/Prompt.d.ts +45 -1
- package/esm/typings/src/types/typeAliases.d.ts +11 -0
- package/esm/typings/src/utils/normalization/parseKeywords.d.ts +2 -1
- package/esm/typings/src/utils/organization/{TODO.d.ts → TODO_any.d.ts} +1 -1
- package/esm/typings/src/utils/organization/TODO_object.d.ts +6 -0
- package/esm/typings/src/utils/organization/TODO_unknown.d.ts +6 -0
- package/esm/typings/src/utils/organization/really_any.d.ts +1 -1
- package/esm/typings/src/utils/organization/really_unknown.d.ts +6 -0
- package/esm/typings/src/utils/validators/email/isValidEmail.d.ts +2 -1
- package/esm/typings/src/utils/validators/filePath/isValidFilePath.d.ts +2 -1
- package/esm/typings/src/utils/validators/javascriptName/isValidJavascriptName.d.ts +2 -1
- package/esm/typings/src/utils/validators/semanticVersion/isValidPromptbookVersion.d.ts +2 -1
- package/esm/typings/src/utils/validators/semanticVersion/isValidSemanticVersion.d.ts +2 -1
- package/esm/typings/src/utils/validators/url/isValidPipelineUrl.d.ts +2 -1
- package/esm/typings/src/utils/validators/url/isValidUrl.d.ts +2 -1
- package/esm/typings/src/utils/validators/uuid/isValidUuid.d.ts +2 -1
- package/package.json +2 -2
- package/umd/index.umd.js +31 -10
- package/umd/index.umd.js.map +1 -1
- package/umd/typings/src/_packages/types.index.d.ts +6 -3
- package/umd/typings/src/commands/MODEL/ModelCommand.d.ts +2 -2
- package/umd/typings/src/config.d.ts +4 -0
- package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +13 -0
- package/umd/typings/src/conversion/utils/stringifyPipelineJson.test.d.ts +1 -0
- package/umd/typings/src/conversion/validation/_importPipeline.d.ts +10 -1
- package/umd/typings/src/conversion/validation/validatePipeline.d.ts +1 -1
- package/umd/typings/src/execution/LlmExecutionTools.d.ts +7 -7
- package/umd/typings/src/execution/PipelineExecutor.d.ts +1 -1
- package/umd/typings/src/execution/PromptResult.d.ts +14 -56
- package/umd/typings/src/execution/PromptResultUsage.d.ts +26 -0
- package/umd/typings/src/execution/UncertainNumber.d.ts +18 -0
- package/umd/typings/src/execution/utils/addUsage.d.ts +1 -1
- package/umd/typings/src/execution/utils/computeUsageCounts.d.ts +1 -1
- package/umd/typings/src/execution/utils/uncertainNumber.d.ts +1 -1
- package/umd/typings/src/execution/utils/usageToWorktime.d.ts +2 -2
- package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +10 -2
- package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +2 -1
- package/umd/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +1 -1
- package/umd/typings/src/llm-providers/_common/utils/count-total-cost/limitTotalCost.d.ts +3 -3
- package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
- package/umd/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -4
- package/umd/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -4
- package/umd/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +6 -6
- package/umd/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -7
- package/umd/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +6 -6
- package/umd/typings/src/llm-providers/openai/computeOpenaiUsage.d.ts +1 -1
- package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +9 -7
- package/umd/typings/src/scripting/javascript/utils/unknownToString.d.ts +2 -1
- package/umd/typings/src/types/ModelRequirements.d.ts +53 -14
- package/umd/typings/src/types/ModelVariant.d.ts +14 -0
- package/umd/typings/src/types/PipelineJson/PersonaJson.d.ts +2 -4
- package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
- package/umd/typings/src/types/Prompt.d.ts +45 -1
- package/umd/typings/src/types/typeAliases.d.ts +11 -0
- package/umd/typings/src/utils/normalization/parseKeywords.d.ts +2 -1
- package/umd/typings/src/utils/organization/{TODO.d.ts → TODO_any.d.ts} +1 -1
- package/umd/typings/src/utils/organization/TODO_object.d.ts +6 -0
- package/umd/typings/src/utils/organization/TODO_unknown.d.ts +6 -0
- package/umd/typings/src/utils/organization/really_any.d.ts +1 -1
- package/umd/typings/src/utils/organization/really_unknown.d.ts +6 -0
- package/umd/typings/src/utils/validators/email/isValidEmail.d.ts +2 -1
- package/umd/typings/src/utils/validators/filePath/isValidFilePath.d.ts +2 -1
- package/umd/typings/src/utils/validators/javascriptName/isValidJavascriptName.d.ts +2 -1
- package/umd/typings/src/utils/validators/semanticVersion/isValidPromptbookVersion.d.ts +2 -1
- package/umd/typings/src/utils/validators/semanticVersion/isValidSemanticVersion.d.ts +2 -1
- package/umd/typings/src/utils/validators/url/isValidPipelineUrl.d.ts +2 -1
- package/umd/typings/src/utils/validators/url/isValidUrl.d.ts +2 -1
- package/umd/typings/src/utils/validators/uuid/isValidUuid.d.ts +2 -1
|
@@ -1,8 +1,16 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
2
|
+
export type CreateLlmToolsFromEnvOptions = {
|
|
3
|
+
/**
|
|
4
|
+
* This will will be passed to the created `LlmExecutionTools`
|
|
5
|
+
*
|
|
6
|
+
* @default false
|
|
7
|
+
*/
|
|
8
|
+
isVerbose?: boolean;
|
|
9
|
+
};
|
|
2
10
|
/**
|
|
3
11
|
* @@@
|
|
4
12
|
*
|
|
5
|
-
* Note: This function is not cached, every call creates new instance of LlmExecutionTools
|
|
13
|
+
* Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
|
|
6
14
|
*
|
|
7
15
|
* It looks for environment variables:
|
|
8
16
|
* - `process.env.OPENAI_API_KEY`
|
|
@@ -10,7 +18,7 @@ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
|
10
18
|
*
|
|
11
19
|
* @returns @@@
|
|
12
20
|
*/
|
|
13
|
-
export declare function createLlmToolsFromEnv(): LlmExecutionTools;
|
|
21
|
+
export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
|
|
14
22
|
/**
|
|
15
23
|
* TODO: [🔼] !!! Export via `@promptbook/node`
|
|
16
24
|
* TODO: @@@ write discussion about this - wizzard
|
package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
2
|
+
import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
|
|
2
3
|
/**
|
|
3
4
|
* Returns LLM tools for testing purposes
|
|
4
5
|
*
|
|
5
6
|
* @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
|
|
6
7
|
*/
|
|
7
|
-
export declare function getLlmToolsForTestingAndScriptsAndPlayground(): LlmExecutionTools;
|
|
8
|
+
export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
|
|
8
9
|
/**
|
|
9
10
|
* Note: [⚪] This should never be in any released package
|
|
10
11
|
*/
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
|
|
2
|
-
import type { PromptResultUsage } from '../../../../execution/
|
|
2
|
+
import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
|
|
3
3
|
/**
|
|
4
4
|
* LLM tools with option to get total cost of the execution
|
|
5
5
|
*/
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
|
|
2
|
-
import type { PromptResultUsage } from '../../../../execution/
|
|
2
|
+
import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
|
|
3
3
|
import type { PromptbookStorage } from '../../../../storage/_common/PromptbookStorage';
|
|
4
|
-
import type {
|
|
4
|
+
import type { TODO_any } from '../../../../utils/organization/TODO_any';
|
|
5
5
|
import type { LlmExecutionToolsWithTotalCost } from './LlmExecutionToolsWithTotalCost';
|
|
6
6
|
/**
|
|
7
7
|
* Options for `limitTotalCost`
|
|
@@ -18,7 +18,7 @@ type LimitTotalCostOptions = {
|
|
|
18
18
|
*
|
|
19
19
|
* @default MemoryStorage
|
|
20
20
|
*/
|
|
21
|
-
storage: PromptbookStorage<
|
|
21
|
+
storage: PromptbookStorage<TODO_any>;
|
|
22
22
|
};
|
|
23
23
|
/**
|
|
24
24
|
* @@@
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/LlmExecutionTools';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
|
-
import type {
|
|
3
|
+
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
4
|
import type { Prompt } from '../../types/Prompt';
|
|
5
5
|
import type { string_markdown } from '../../types/typeAliases';
|
|
6
6
|
import type { string_markdown_text } from '../../types/typeAliases';
|
|
@@ -26,7 +26,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
26
26
|
/**
|
|
27
27
|
* Calls Anthropic Claude API to use a chat model.
|
|
28
28
|
*/
|
|
29
|
-
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<
|
|
29
|
+
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<ChatPromptResult>;
|
|
30
30
|
/**
|
|
31
31
|
* Get the model that should be used as default
|
|
32
32
|
*/
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/LlmExecutionTools';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
|
-
import type {
|
|
4
|
-
import type {
|
|
3
|
+
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
5
5
|
import type { Prompt } from '../../types/Prompt';
|
|
6
6
|
import type { string_markdown } from '../../types/typeAliases';
|
|
7
7
|
import type { string_markdown_text } from '../../types/typeAliases';
|
|
@@ -27,11 +27,11 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
|
|
|
27
27
|
/**
|
|
28
28
|
* Calls OpenAI API to use a chat model.
|
|
29
29
|
*/
|
|
30
|
-
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<
|
|
30
|
+
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<ChatPromptResult>;
|
|
31
31
|
/**
|
|
32
32
|
* Calls Azure OpenAI API to use a complete model.
|
|
33
33
|
*/
|
|
34
|
-
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<
|
|
34
|
+
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<CompletionPromptResult>;
|
|
35
35
|
/**
|
|
36
36
|
* Changes Azure error (which is not propper Error but object) to propper Error
|
|
37
37
|
*/
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
|
|
2
2
|
import type { AvailableModel } from '../../execution/LlmExecutionTools';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
|
-
import type {
|
|
5
|
-
import type {
|
|
4
|
+
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
5
|
+
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
6
6
|
import type { Prompt } from '../../types/Prompt';
|
|
7
7
|
import type { string_markdown } from '../../types/typeAliases';
|
|
8
8
|
import type { string_markdown_text } from '../../types/typeAliases';
|
|
@@ -18,11 +18,11 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
|
|
|
18
18
|
/**
|
|
19
19
|
* Mocks chat model
|
|
20
20
|
*/
|
|
21
|
-
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<
|
|
21
|
+
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<ChatPromptResult>;
|
|
22
22
|
/**
|
|
23
23
|
* Mocks completion model
|
|
24
24
|
*/
|
|
25
|
-
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<
|
|
25
|
+
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<CompletionPromptResult>;
|
|
26
26
|
/**
|
|
27
27
|
* List all available mocked-models that can be used
|
|
28
28
|
*/
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
|
|
2
2
|
import type { AvailableModel } from '../../execution/LlmExecutionTools';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
|
-
import type {
|
|
5
|
-
import type {
|
|
6
|
-
import type {
|
|
4
|
+
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
5
|
+
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
6
|
+
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
|
7
7
|
import type { Prompt } from '../../types/Prompt';
|
|
8
8
|
import type { string_markdown } from '../../types/typeAliases';
|
|
9
9
|
import type { string_markdown_text } from '../../types/typeAliases';
|
|
@@ -19,15 +19,15 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
|
|
|
19
19
|
/**
|
|
20
20
|
* Fakes chat model
|
|
21
21
|
*/
|
|
22
|
-
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<
|
|
22
|
+
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<ChatPromptResult & CompletionPromptResult>;
|
|
23
23
|
/**
|
|
24
24
|
* Fakes completion model
|
|
25
25
|
*/
|
|
26
|
-
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<
|
|
26
|
+
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<CompletionPromptResult>;
|
|
27
27
|
/**
|
|
28
28
|
* Fakes embedding model
|
|
29
29
|
*/
|
|
30
|
-
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<
|
|
30
|
+
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<EmbeddingPromptResult>;
|
|
31
31
|
/**
|
|
32
32
|
* List all available fake-models that can be used
|
|
33
33
|
*/
|
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/LlmExecutionTools';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
|
-
import type {
|
|
4
|
-
import type {
|
|
5
|
-
import type {
|
|
6
|
-
import type {
|
|
3
|
+
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
5
|
+
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
|
6
|
+
import type { ChatPrompt } from '../../types/Prompt';
|
|
7
|
+
import type { CompletionPrompt } from '../../types/Prompt';
|
|
8
|
+
import type { EmbeddingPrompt } from '../../types/Prompt';
|
|
7
9
|
import type { string_markdown } from '../../types/typeAliases';
|
|
8
10
|
import type { string_markdown_text } from '../../types/typeAliases';
|
|
9
11
|
import type { string_title } from '../../types/typeAliases';
|
|
@@ -26,15 +28,15 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
|
26
28
|
/**
|
|
27
29
|
* Calls the best available chat model
|
|
28
30
|
*/
|
|
29
|
-
callChatModel(prompt:
|
|
31
|
+
callChatModel(prompt: ChatPrompt): Promise<ChatPromptResult>;
|
|
30
32
|
/**
|
|
31
33
|
* Calls the best available completion model
|
|
32
34
|
*/
|
|
33
|
-
callCompletionModel(prompt:
|
|
35
|
+
callCompletionModel(prompt: CompletionPrompt): Promise<CompletionPromptResult>;
|
|
34
36
|
/**
|
|
35
37
|
* Calls the best available embedding model
|
|
36
38
|
*/
|
|
37
|
-
callEmbeddingModel(prompt:
|
|
39
|
+
callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
|
|
38
40
|
/**
|
|
39
41
|
* Calls the best available model
|
|
40
42
|
*/
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/LlmExecutionTools';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
|
-
import type {
|
|
4
|
-
import type {
|
|
5
|
-
import type {
|
|
3
|
+
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
5
|
+
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
|
6
6
|
import type { Prompt } from '../../types/Prompt';
|
|
7
7
|
import type { string_markdown } from '../../types/typeAliases';
|
|
8
8
|
import type { string_markdown_text } from '../../types/typeAliases';
|
|
@@ -28,15 +28,15 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
28
28
|
/**
|
|
29
29
|
* Calls OpenAI API to use a chat model.
|
|
30
30
|
*/
|
|
31
|
-
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectFormat'>): Promise<
|
|
31
|
+
callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectFormat'>): Promise<ChatPromptResult>;
|
|
32
32
|
/**
|
|
33
33
|
* Calls OpenAI API to use a complete model.
|
|
34
34
|
*/
|
|
35
|
-
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<
|
|
35
|
+
callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<CompletionPromptResult>;
|
|
36
36
|
/**
|
|
37
37
|
* Calls OpenAI API to use a embedding model
|
|
38
38
|
*/
|
|
39
|
-
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<
|
|
39
|
+
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<EmbeddingPromptResult>;
|
|
40
40
|
/**
|
|
41
41
|
* Get the model that should be used as default
|
|
42
42
|
*/
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import type OpenAI from 'openai';
|
|
2
|
-
import type { PromptResultUsage } from '../../execution/
|
|
2
|
+
import type { PromptResultUsage } from '../../execution/PromptResultUsage';
|
|
3
3
|
import type { Prompt } from '../../types/Prompt';
|
|
4
4
|
/**
|
|
5
5
|
* Computes the usage of the OpenAI API based on the response from OpenAI
|
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
import type { AvailableModel } from '../../execution/LlmExecutionTools';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
|
-
import type {
|
|
4
|
-
import type {
|
|
5
|
-
import type {
|
|
6
|
-
import type {
|
|
3
|
+
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
|
+
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
5
|
+
import type { EmbeddingPromptResult } from '../../execution/PromptResult';
|
|
6
|
+
import type { ChatPrompt } from '../../types/Prompt';
|
|
7
|
+
import type { CompletionPrompt } from '../../types/Prompt';
|
|
8
|
+
import type { EmbeddingPrompt } from '../../types/Prompt';
|
|
7
9
|
import type { string_markdown } from '../../types/typeAliases';
|
|
8
10
|
import type { string_markdown_text } from '../../types/typeAliases';
|
|
9
11
|
import type { string_title } from '../../types/typeAliases';
|
|
@@ -28,15 +30,15 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
|
|
|
28
30
|
/**
|
|
29
31
|
* Calls remote proxy server to use a chat model
|
|
30
32
|
*/
|
|
31
|
-
callChatModel(prompt:
|
|
33
|
+
callChatModel(prompt: ChatPrompt): Promise<ChatPromptResult>;
|
|
32
34
|
/**
|
|
33
35
|
* Calls remote proxy server to use a completion model
|
|
34
36
|
*/
|
|
35
|
-
callCompletionModel(prompt:
|
|
37
|
+
callCompletionModel(prompt: CompletionPrompt): Promise<CompletionPromptResult>;
|
|
36
38
|
/**
|
|
37
39
|
* Calls remote proxy server to use a embedding model
|
|
38
40
|
*/
|
|
39
|
-
callEmbeddingModel(prompt:
|
|
41
|
+
callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
|
|
40
42
|
/**
|
|
41
43
|
* Calls remote proxy server to use both completion or chat model
|
|
42
44
|
*/
|
|
@@ -1,7 +1,8 @@
|
|
|
1
|
+
import type { really_unknown } from '../../../utils/organization/really_unknown';
|
|
1
2
|
/**
|
|
2
3
|
* Converts anything to string that can be used for debugging and logging
|
|
3
4
|
*
|
|
4
5
|
* @param value String value for logging
|
|
5
6
|
* @private Internal util
|
|
6
7
|
*/
|
|
7
|
-
export declare function unknownToString(value:
|
|
8
|
+
export declare function unknownToString(value: really_unknown): string;
|
|
@@ -1,28 +1,67 @@
|
|
|
1
|
+
import type { ModelVariant } from './ModelVariant';
|
|
1
2
|
import type { number_model_temperature } from './typeAliases';
|
|
2
3
|
import type { number_seed } from './typeAliases';
|
|
3
4
|
import type { string_model_name } from './typeAliases';
|
|
4
5
|
import type { string_system_message } from './typeAliases';
|
|
5
|
-
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "EMBEDDING"];
|
|
6
6
|
/**
|
|
7
|
-
*
|
|
7
|
+
* Abstract way to specify the LLM.
|
|
8
|
+
* It does not specify the LLM with concrete version itself, only the requirements for the LLM.
|
|
8
9
|
*
|
|
9
|
-
*
|
|
10
|
-
*
|
|
11
|
-
* - **CHAT** - model that takes prompt and previous messages and returns response
|
|
10
|
+
* Note: This is fully serializable as JSON
|
|
11
|
+
* @see https://github.com/webgptorg/promptbook#model-requirements
|
|
12
12
|
*/
|
|
13
|
-
export type
|
|
13
|
+
export type ModelRequirements = CompletionModelRequirements | ChatModelRequirements | EmbeddingModelRequirements;
|
|
14
14
|
/**
|
|
15
|
-
*
|
|
15
|
+
* Model requirements for the completion variant
|
|
16
16
|
*
|
|
17
|
-
*
|
|
17
|
+
* Note: This is fully serializable as JSON
|
|
18
|
+
*/
|
|
19
|
+
export type CompletionModelRequirements = CommonModelRequirements & {
|
|
20
|
+
/**
|
|
21
|
+
* Completion model variant
|
|
22
|
+
*/
|
|
23
|
+
modelVariant: 'COMPLETION';
|
|
24
|
+
};
|
|
25
|
+
/**
|
|
26
|
+
* Model requirements for the chat variant
|
|
27
|
+
*
|
|
28
|
+
* Note: This is fully serializable as JSON
|
|
18
29
|
*/
|
|
19
|
-
export type
|
|
30
|
+
export type ChatModelRequirements = CommonModelRequirements & {
|
|
31
|
+
/**
|
|
32
|
+
* Chat model variant
|
|
33
|
+
*/
|
|
34
|
+
modelVariant: 'CHAT';
|
|
35
|
+
/**
|
|
36
|
+
* System message to be used in the model
|
|
37
|
+
*/
|
|
38
|
+
readonly systemMessage?: string_system_message;
|
|
39
|
+
};
|
|
40
|
+
/**
|
|
41
|
+
* Model requirements for the embedding variant
|
|
42
|
+
*
|
|
43
|
+
* Note: This is fully serializable as JSON
|
|
44
|
+
*/
|
|
45
|
+
export type EmbeddingModelRequirements = CommonModelRequirements & {
|
|
46
|
+
/**
|
|
47
|
+
* Embedding model variant
|
|
48
|
+
*/
|
|
49
|
+
modelVariant: 'EMBEDDING';
|
|
50
|
+
};
|
|
51
|
+
/**
|
|
52
|
+
* Common properties for all model requirements variants
|
|
53
|
+
*
|
|
54
|
+
* Note: This is fully serializable as JSON
|
|
55
|
+
*/
|
|
56
|
+
export type CommonModelRequirements = {
|
|
20
57
|
/**
|
|
21
58
|
* Model variant describes the very general type of the model
|
|
22
59
|
*
|
|
23
|
-
* There are
|
|
60
|
+
* There are 3 variants:
|
|
24
61
|
* - **COMPLETION** - model that takes prompt and writes the rest of the text
|
|
25
62
|
* - **CHAT** - model that takes prompt and previous messages and returns response
|
|
63
|
+
* - **EMBEDDING** - model that takes prompt and returns embedding
|
|
64
|
+
* <- [🤖]
|
|
26
65
|
*/
|
|
27
66
|
readonly modelVariant: ModelVariant;
|
|
28
67
|
/**
|
|
@@ -34,10 +73,6 @@ export type ModelRequirements = {
|
|
|
34
73
|
* @example 'gpt-4', 'gpt-4-32k-0314', 'gpt-3.5-turbo-instruct',...
|
|
35
74
|
*/
|
|
36
75
|
readonly modelName?: string_model_name;
|
|
37
|
-
/**
|
|
38
|
-
* System message to be used in the model
|
|
39
|
-
*/
|
|
40
|
-
readonly systemMessage?: string_system_message;
|
|
41
76
|
/**
|
|
42
77
|
* The temperature of the model
|
|
43
78
|
*
|
|
@@ -50,10 +85,13 @@ export type ModelRequirements = {
|
|
|
50
85
|
readonly seed?: number_seed;
|
|
51
86
|
/**
|
|
52
87
|
* Maximum number of tokens that can be generated by the model
|
|
88
|
+
*
|
|
89
|
+
* Note: [🌾]
|
|
53
90
|
*/
|
|
54
91
|
readonly maxTokens?: number;
|
|
55
92
|
};
|
|
56
93
|
/**
|
|
94
|
+
* TODO: [🔼] !!!! Export all from `@promptbook/types`
|
|
57
95
|
* TODO: [🧠][🈁] `seed` should maybe be somewhere else (not in `ModelRequirements`) (simmilar that `user` identification is not here)
|
|
58
96
|
* TODO: [🧠][💱] Add more model options: `stop_token`, `logit_bias`, `logprobs` (`top_logprobs`), `top_k`, `top_p`, `presence_penalty`, `frequency_penalty`, `bestOf`, `logitBias`, `logitBiasType`,...
|
|
59
97
|
* [💱] Probbably keep using just `temperature` in Promptbook (not `top_k` and `top_p`)
|
|
@@ -61,4 +99,5 @@ export type ModelRequirements = {
|
|
|
61
99
|
* TODO: Maybe figure out better word than "variant"
|
|
62
100
|
* TODO: Add here more requirement options like max context size, max tokens, etc.
|
|
63
101
|
* TODO: [💕][🧠] Just selecting gpt3 or gpt4 level of model
|
|
102
|
+
* TODO: [🧄] Replace all "github.com/webgptorg/promptbook#xxx" with "ptbk.io/xxx"
|
|
64
103
|
*/
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @@@
|
|
3
|
+
*
|
|
4
|
+
* @private for `ModelVariant` and `modelCommandParser`
|
|
5
|
+
*/
|
|
6
|
+
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "EMBEDDING"];
|
|
7
|
+
/**
|
|
8
|
+
* Model variant describes the very general type of the model
|
|
9
|
+
*
|
|
10
|
+
* There are two variants:
|
|
11
|
+
* - **COMPLETION** - model that takes prompt and writes the rest of the text
|
|
12
|
+
* - **CHAT** - model that takes prompt and previous messages and returns response
|
|
13
|
+
*/
|
|
14
|
+
export type ModelVariant = typeof MODEL_VARIANTS[number];
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { ChatModelRequirements } from '../ModelRequirements';
|
|
2
2
|
import type { number_id } from '../typeAliases';
|
|
3
3
|
import type { string_name } from '../typeAliases';
|
|
4
4
|
import type { string_persona_description } from '../typeAliases';
|
|
@@ -30,9 +30,7 @@ export type PersonaPreparedJson = PersonaJson & {
|
|
|
30
30
|
*
|
|
31
31
|
* Note: The model must be CHAT variant to be usable through persona
|
|
32
32
|
*/
|
|
33
|
-
readonly modelRequirements:
|
|
34
|
-
readonly modelVariant: 'CHAT';
|
|
35
|
-
};
|
|
33
|
+
readonly modelRequirements: ChatModelRequirements;
|
|
36
34
|
/**
|
|
37
35
|
* List of preparation ids that were used to prepare this persona
|
|
38
36
|
*/
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { PromptResultUsage } from '../../execution/
|
|
1
|
+
import type { PromptResultUsage } from '../../execution/PromptResultUsage';
|
|
2
2
|
import type { string_promptbook_version } from '../../version';
|
|
3
3
|
import type { number_id } from '../typeAliases';
|
|
4
4
|
export type PreparationJson = {
|
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
import type { ExpectFormatCommand } from '../commands/EXPECT/ExpectFormatCommand';
|
|
2
2
|
import type { PostprocessingFunction } from '../scripting/javascript/JavascriptExecutionToolsOptions';
|
|
3
|
+
import type { ChatModelRequirements } from './ModelRequirements';
|
|
4
|
+
import type { CompletionModelRequirements } from './ModelRequirements';
|
|
5
|
+
import type { EmbeddingModelRequirements } from './ModelRequirements';
|
|
3
6
|
import type { ModelRequirements } from './ModelRequirements';
|
|
4
7
|
import type { Expectations } from './PipelineJson/Expectations';
|
|
5
8
|
import type { string_parameter_name } from './typeAliases';
|
|
@@ -13,7 +16,46 @@ import type { string_title } from './typeAliases';
|
|
|
13
16
|
* Note: [🛫] This is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
14
17
|
* @see https://github.com/webgptorg/promptbook#prompt
|
|
15
18
|
*/
|
|
16
|
-
export type Prompt =
|
|
19
|
+
export type Prompt = CompletionPrompt | ChatPrompt | EmbeddingPrompt;
|
|
20
|
+
/**
|
|
21
|
+
* Completion prompt
|
|
22
|
+
*
|
|
23
|
+
* Note: [🛫] This is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
24
|
+
*/
|
|
25
|
+
export type CompletionPrompt = CommonPrompt & {
|
|
26
|
+
/**
|
|
27
|
+
* Requirements for completion model
|
|
28
|
+
*/
|
|
29
|
+
modelRequirements: CompletionModelRequirements;
|
|
30
|
+
};
|
|
31
|
+
/**
|
|
32
|
+
* Chat prompt
|
|
33
|
+
*
|
|
34
|
+
* Note: [🛫] This is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
35
|
+
*/
|
|
36
|
+
export type ChatPrompt = CommonPrompt & {
|
|
37
|
+
/**
|
|
38
|
+
* Requirements for chat model
|
|
39
|
+
*/
|
|
40
|
+
modelRequirements: ChatModelRequirements;
|
|
41
|
+
};
|
|
42
|
+
/**
|
|
43
|
+
* Embedding prompt
|
|
44
|
+
*
|
|
45
|
+
* Note: [🛫] This is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
46
|
+
*/
|
|
47
|
+
export type EmbeddingPrompt = CommonPrompt & {
|
|
48
|
+
/**
|
|
49
|
+
* Requirements for chat model
|
|
50
|
+
*/
|
|
51
|
+
modelRequirements: EmbeddingModelRequirements;
|
|
52
|
+
};
|
|
53
|
+
/**
|
|
54
|
+
* Common properties for all prompt results
|
|
55
|
+
*
|
|
56
|
+
* Note: This is fully serializable as JSON
|
|
57
|
+
*/
|
|
58
|
+
export type CommonPrompt = {
|
|
17
59
|
/**
|
|
18
60
|
* The title of the prompt
|
|
19
61
|
*
|
|
@@ -63,6 +105,8 @@ export type Prompt = {
|
|
|
63
105
|
readonly parameters: Record<string_parameter_name, string_parameter_value>;
|
|
64
106
|
};
|
|
65
107
|
/**
|
|
108
|
+
* TODO: [🔼] !!!! Export all from `@promptbook/types`
|
|
109
|
+
* TODO: [🧄] Replace all "github.com/webgptorg/promptbook#xxx" with "ptbk.io/xxx"
|
|
66
110
|
* TODO: [✔] Check ModelRequirements in runtime
|
|
67
111
|
* TODO: [🏳] Add options for translation - maybe create `TranslationPrompt`
|
|
68
112
|
*/
|
|
@@ -196,6 +196,17 @@ export type string_script = string;
|
|
|
196
196
|
* For example `console.info("Hello World!")`
|
|
197
197
|
*/
|
|
198
198
|
export type string_javascript = string;
|
|
199
|
+
/**
|
|
200
|
+
* Semantic helper for JSON strings
|
|
201
|
+
*
|
|
202
|
+
* Note: TType is a type of the JSON object inside the string
|
|
203
|
+
*
|
|
204
|
+
* For example `{"foo": "bar"}`
|
|
205
|
+
*/
|
|
206
|
+
export type string_json<TType> = string & {
|
|
207
|
+
_type: 'string_json';
|
|
208
|
+
scheme: TType;
|
|
209
|
+
};
|
|
199
210
|
/**
|
|
200
211
|
* Semantic helper
|
|
201
212
|
*
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import type { really_unknown } from '../organization/really_unknown';
|
|
1
2
|
import type { IKeywords } from './IKeywords';
|
|
2
3
|
/**
|
|
3
4
|
* Parses keywords from any object and recursively walks through
|
|
@@ -7,7 +8,7 @@ import type { IKeywords } from './IKeywords';
|
|
|
7
8
|
* @param input of any kind
|
|
8
9
|
* @returns {Set} of keywords without diacritics in lowercase
|
|
9
10
|
*/
|
|
10
|
-
export declare function parseKeywords(input:
|
|
11
|
+
export declare function parseKeywords(input: really_unknown): IKeywords;
|
|
11
12
|
/**
|
|
12
13
|
* Note: Not using spread in input param because of keeping second parameter for options
|
|
13
14
|
* TODO: [🌺] Use some intermediate util splitWords
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { string_email } from '../../../types/typeAliases';
|
|
2
|
+
import type { really_unknown } from '../../organization/really_unknown';
|
|
2
3
|
/**
|
|
3
4
|
* Checks if value is valid email
|
|
4
5
|
*/
|
|
5
|
-
export declare function isValidEmail(email:
|
|
6
|
+
export declare function isValidEmail(email: really_unknown): email is string_email;
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import type { string_file_path } from '../../../types/typeAliases';
|
|
2
|
+
import type { really_unknown } from '../../organization/really_unknown';
|
|
2
3
|
/**
|
|
3
4
|
* Tests if given string is valid URL.
|
|
4
5
|
*
|
|
5
6
|
* Note: This does not check if the file exists only if the path is valid
|
|
6
7
|
*/
|
|
7
|
-
export declare function isValidFilePath(filePath:
|
|
8
|
+
export declare function isValidFilePath(filePath: really_unknown): filePath is string_file_path;
|
|
@@ -1,2 +1,3 @@
|
|
|
1
1
|
import type { string_javascript_name } from '../../../types/typeAliases';
|
|
2
|
-
|
|
2
|
+
import type { really_unknown } from '../../organization/really_unknown';
|
|
3
|
+
export declare function isValidJavascriptName(javascriptName: really_unknown): javascriptName is string_javascript_name;
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { string_promptbook_version } from '../../../version';
|
|
2
|
+
import type { really_unknown } from '../../organization/really_unknown';
|
|
2
3
|
/**
|
|
3
4
|
* Tests if given string is valid promptbook version
|
|
4
5
|
* It looks into list of known promptbook versions.
|
|
@@ -9,4 +10,4 @@ import type { string_promptbook_version } from '../../../version';
|
|
|
9
10
|
* - `isValidSemanticVersion` which tests any semantic version
|
|
10
11
|
* - `isValidPromptbookVersion` *(this one)* which tests just Promptbook versions
|
|
11
12
|
*/
|
|
12
|
-
export declare function isValidPromptbookVersion(version:
|
|
13
|
+
export declare function isValidPromptbookVersion(version: really_unknown): version is string_promptbook_version;
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { string_semantic_version } from '../../../types/typeAliases';
|
|
2
|
+
import type { really_unknown } from '../../organization/really_unknown';
|
|
2
3
|
/**
|
|
3
4
|
* Tests if given string is valid semantic version
|
|
4
5
|
*
|
|
@@ -6,4 +7,4 @@ import type { string_semantic_version } from '../../../types/typeAliases';
|
|
|
6
7
|
* - `isValidSemanticVersion` which tests any semantic version
|
|
7
8
|
* - `isValidPromptbookVersion` *(this one)* which tests just Promptbook versions
|
|
8
9
|
*/
|
|
9
|
-
export declare function isValidSemanticVersion(version:
|
|
10
|
+
export declare function isValidSemanticVersion(version: really_unknown): version is string_semantic_version;
|