@promptbook/remote-server 0.66.0-1 → 0.66.0-4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +47 -21
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -0
- package/esm/typings/src/_packages/core.index.d.ts +8 -2
- package/esm/typings/src/_packages/openai.index.d.ts +4 -0
- package/esm/typings/src/_packages/types.index.d.ts +6 -2
- package/esm/typings/src/config.d.ts +0 -7
- package/esm/typings/src/execution/AvailableModel.d.ts +20 -0
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -19
- package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +10 -0
- package/esm/typings/src/llm-providers/_common/$llmToolsConfigurationBoilerplatesRegister.d.ts +12 -0
- package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +10 -0
- package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +2 -10
- package/esm/typings/src/llm-providers/_common/config.d.ts +4 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +7 -2
- package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +8 -0
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +11 -0
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.d.ts → computeOpenAiUsage.d.ts} +2 -2
- package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.test.d.ts → computeOpenAiUsage.test.d.ts} +1 -1
- package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +15 -0
- package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +8 -0
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +11 -0
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/utils/Register.d.ts +22 -0
- package/package.json +2 -2
- package/umd/index.umd.js +47 -21
- package/umd/index.umd.js.map +1 -1
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import type { LlmToolsConfiguration } from './llm-providers/_common/LlmToolsConfiguration';
|
|
2
1
|
/**
|
|
3
2
|
* Warning message for the generated sections and files files
|
|
4
3
|
*
|
|
@@ -120,12 +119,6 @@ export declare const DEFAULT_REMOTE_URL = "https://api.pavolhejny.com/";
|
|
|
120
119
|
* @public exported from `@promptbook/core`
|
|
121
120
|
*/
|
|
122
121
|
export declare const DEFAULT_REMOTE_URL_PATH = "/promptbook/socket.io";
|
|
123
|
-
/**
|
|
124
|
-
* @@@
|
|
125
|
-
*
|
|
126
|
-
* @public exported from `@promptbook/core`
|
|
127
|
-
*/
|
|
128
|
-
export declare const BOILERPLATE_LLM_TOOLS_CONFIGURATION_: LlmToolsConfiguration;
|
|
129
122
|
/**
|
|
130
123
|
* @@@
|
|
131
124
|
*
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import type { ModelVariant } from '../types/ModelVariant';
|
|
2
|
+
import type { string_model_name } from '../types/typeAliases';
|
|
3
|
+
import type { string_title } from '../types/typeAliases';
|
|
4
|
+
/**
|
|
5
|
+
* Represents a model that can be used for prompt execution
|
|
6
|
+
*/
|
|
7
|
+
export type AvailableModel = {
|
|
8
|
+
/**
|
|
9
|
+
* The model title
|
|
10
|
+
*/
|
|
11
|
+
readonly modelTitle: string_title;
|
|
12
|
+
/**
|
|
13
|
+
* The model name aviailable
|
|
14
|
+
*/
|
|
15
|
+
readonly modelName: string_model_name;
|
|
16
|
+
/**
|
|
17
|
+
* Variant of the model
|
|
18
|
+
*/
|
|
19
|
+
readonly modelVariant: ModelVariant;
|
|
20
|
+
};
|
|
@@ -1,10 +1,9 @@
|
|
|
1
1
|
import type { Promisable } from 'type-fest';
|
|
2
|
-
import type { ModelVariant } from '../types/ModelVariant';
|
|
3
2
|
import type { Prompt } from '../types/Prompt';
|
|
4
3
|
import type { string_markdown } from '../types/typeAliases';
|
|
5
4
|
import type { string_markdown_text } from '../types/typeAliases';
|
|
6
|
-
import type { string_model_name } from '../types/typeAliases';
|
|
7
5
|
import type { string_title } from '../types/typeAliases';
|
|
6
|
+
import type { AvailableModel } from './AvailableModel';
|
|
8
7
|
import type { ChatPromptResult } from './PromptResult';
|
|
9
8
|
import type { CompletionPromptResult } from './PromptResult';
|
|
10
9
|
import type { EmbeddingPromptResult } from './PromptResult';
|
|
@@ -51,23 +50,6 @@ export type LlmExecutionTools = {
|
|
|
51
50
|
*/
|
|
52
51
|
callEmbeddingModel?(prompt: Prompt): Promise<EmbeddingPromptResult>;
|
|
53
52
|
};
|
|
54
|
-
/**
|
|
55
|
-
* Represents a model that can be used for prompt execution
|
|
56
|
-
*/
|
|
57
|
-
export type AvailableModel = {
|
|
58
|
-
/**
|
|
59
|
-
* The model title
|
|
60
|
-
*/
|
|
61
|
-
readonly modelTitle: string_title;
|
|
62
|
-
/**
|
|
63
|
-
* The model name aviailable
|
|
64
|
-
*/
|
|
65
|
-
readonly modelName: string_model_name;
|
|
66
|
-
/**
|
|
67
|
-
* Variant of the model
|
|
68
|
-
*/
|
|
69
|
-
readonly modelVariant: ModelVariant;
|
|
70
|
-
};
|
|
71
53
|
/**
|
|
72
54
|
* TODO: Implement destroyable pattern to free resources
|
|
73
55
|
* TODO: [🏳] Add `callTranslationModel`
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { TODO_any } from '../utils/organization/TODO_any';
|
|
2
|
+
import type { Registered } from '../utils/Register';
|
|
3
|
+
import type { LlmExecutionTools } from './LlmExecutionTools';
|
|
4
|
+
/**
|
|
5
|
+
* @@@
|
|
6
|
+
*/
|
|
7
|
+
export type LlmExecutionToolsConstructor = Registered & ((options: TODO_any) => LlmExecutionTools);
|
|
8
|
+
/**
|
|
9
|
+
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
10
|
+
*/
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { Register } from '../../utils/Register';
|
|
2
|
+
/**
|
|
3
|
+
* @@@
|
|
4
|
+
*
|
|
5
|
+
* Note: `$` is used to indicate that this interacts with the global scope
|
|
6
|
+
* @singleton Only one instance of each register is created per build, but thare can be more @@@
|
|
7
|
+
* @public exported from `@promptbook/core`
|
|
8
|
+
*/
|
|
9
|
+
export declare const $llmToolsConfigurationBoilerplatesRegister: Register<import("../../utils/Register").Registered & {
|
|
10
|
+
title: string;
|
|
11
|
+
options: object;
|
|
12
|
+
}>;
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { LlmExecutionToolsConstructor } from '../../execution/LlmExecutionToolsConstructor';
|
|
2
|
+
import { Register } from '../../utils/Register';
|
|
3
|
+
/**
|
|
4
|
+
* @@@
|
|
5
|
+
*
|
|
6
|
+
* Note: `$` is used to indicate that this interacts with the global scope
|
|
7
|
+
* @singleton Only one instance of each register is created per build, but thare can be more @@@
|
|
8
|
+
* @public exported from `@promptbook/core`
|
|
9
|
+
*/
|
|
10
|
+
export declare const $llmToolsRegister: Register<LlmExecutionToolsConstructor>;
|
|
@@ -1,22 +1,14 @@
|
|
|
1
1
|
import type { string_title } from '../../types/typeAliases';
|
|
2
2
|
import type { TODO_object } from '../../utils/organization/TODO_object';
|
|
3
|
-
import type {
|
|
3
|
+
import type { Registered } from '../../utils/Register';
|
|
4
4
|
/**
|
|
5
5
|
* @@@
|
|
6
6
|
*/
|
|
7
|
-
export type LlmToolsConfiguration = Array<{
|
|
7
|
+
export type LlmToolsConfiguration = Array<Registered & {
|
|
8
8
|
/**
|
|
9
9
|
* @@@
|
|
10
10
|
*/
|
|
11
11
|
title: string_title;
|
|
12
|
-
/**
|
|
13
|
-
* @@@
|
|
14
|
-
*/
|
|
15
|
-
packageName: TODO_string;
|
|
16
|
-
/**
|
|
17
|
-
* @@@
|
|
18
|
-
*/
|
|
19
|
-
className: TODO_string;
|
|
20
12
|
/**
|
|
21
13
|
* @@@
|
|
22
14
|
*/
|
|
@@ -1,6 +1,10 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
2
2
|
import type { TODO_any } from '../../utils/organization/TODO_any';
|
|
3
3
|
/**
|
|
4
|
+
* @@@
|
|
5
|
+
*
|
|
6
|
+
* TODO: !!!!!! Not centralized - register each provider to each package
|
|
7
|
+
*
|
|
4
8
|
* @private internal type for `createLlmToolsFromConfiguration`
|
|
5
9
|
*/
|
|
6
10
|
export declare const EXECUTION_TOOLS_CLASSES: Record<`create${string}`, (options: TODO_any) => LlmExecutionTools>;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { AvailableModel } from '../../execution/
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
3
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
4
|
import type { Prompt } from '../../types/Prompt';
|
|
@@ -47,7 +47,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
47
47
|
* TODO: [🍆] JSON mode
|
|
48
48
|
* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
|
|
49
49
|
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
50
|
-
* TODO: Maybe make custom
|
|
50
|
+
* TODO: Maybe make custom OpenAiError
|
|
51
51
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
52
52
|
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
53
53
|
* TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
|
|
@@ -14,5 +14,5 @@ import type { Prompt } from '../../types/Prompt';
|
|
|
14
14
|
export declare function computeAnthropicClaudeUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
15
15
|
resultContent: string, rawResponse: PartialDeep<Pick<Anthropic.Messages.Message, 'model' | 'usage'>>): PromptResultUsage;
|
|
16
16
|
/**
|
|
17
|
-
* TODO: [🤝] DRY Maybe some common abstraction between `
|
|
17
|
+
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
18
18
|
*/
|
package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts
CHANGED
|
@@ -6,9 +6,14 @@ import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExec
|
|
|
6
6
|
*
|
|
7
7
|
* @public exported from `@promptbook/anthropic-claude`
|
|
8
8
|
*/
|
|
9
|
-
export declare
|
|
9
|
+
export declare const createAnthropicClaudeExecutionTools: ((options: AnthropicClaudeExecutionToolsOptions) => AnthropicClaudeExecutionTools | RemoteLlmExecutionTools) & {
|
|
10
|
+
packageName: string;
|
|
11
|
+
className: string;
|
|
12
|
+
};
|
|
10
13
|
/**
|
|
11
14
|
* TODO: [🧠] !!!! Make anonymous this with all LLM providers
|
|
12
|
-
* TODO: [🧠] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
|
|
15
|
+
* TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
|
|
13
16
|
* TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
|
|
17
|
+
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
18
|
+
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
14
19
|
*/
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @@@ registration1 of default configuration for Anthropic Claude
|
|
3
|
+
*
|
|
4
|
+
* Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/core`
|
|
7
|
+
*/
|
|
8
|
+
export declare const _AnthropicClaudeConfigurationRegistration: void;
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @@@ registration2
|
|
3
|
+
*
|
|
4
|
+
* Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/anthropic-claude`
|
|
7
|
+
*/
|
|
8
|
+
export declare const _AnthropicClaudeRegistration: void;
|
|
9
|
+
/**
|
|
10
|
+
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
11
|
+
*/
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { AvailableModel } from '../../execution/
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
3
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
4
|
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
@@ -45,7 +45,7 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
|
|
|
45
45
|
}
|
|
46
46
|
/**
|
|
47
47
|
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
48
|
-
* TODO: Maybe make custom
|
|
48
|
+
* TODO: Maybe make custom AzureOpenAiError
|
|
49
49
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
50
50
|
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
51
51
|
*/
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
1
2
|
import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
|
|
2
|
-
import type { AvailableModel } from '../../execution/LlmExecutionTools';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
4
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
5
5
|
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
1
2
|
import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
|
|
2
|
-
import type { AvailableModel } from '../../execution/LlmExecutionTools';
|
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
4
4
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
5
5
|
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { AvailableModel } from '../../execution/
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
3
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
4
|
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { AvailableModel } from '../../execution/
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
3
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
4
|
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
@@ -9,7 +9,7 @@ import type { string_markdown_text } from '../../types/typeAliases';
|
|
|
9
9
|
import type { string_title } from '../../types/typeAliases';
|
|
10
10
|
import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
|
|
11
11
|
/**
|
|
12
|
-
* Execution Tools for calling OpenAI API
|
|
12
|
+
* Execution Tools for calling OpenAI API
|
|
13
13
|
*
|
|
14
14
|
* @public exported from `@promptbook/openai`
|
|
15
15
|
*/
|
|
@@ -63,7 +63,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
63
63
|
/**
|
|
64
64
|
* TODO: [🧠][🧙♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
|
|
65
65
|
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
66
|
-
* TODO: Maybe make custom
|
|
66
|
+
* TODO: Maybe make custom OpenAiError
|
|
67
67
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
68
68
|
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
69
69
|
*/
|
package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.d.ts → computeOpenAiUsage.d.ts}
RENAMED
|
@@ -11,8 +11,8 @@ import type { Prompt } from '../../types/Prompt';
|
|
|
11
11
|
* @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
|
|
12
12
|
* @private internal utility of `OpenAiExecutionTools`
|
|
13
13
|
*/
|
|
14
|
-
export declare function
|
|
14
|
+
export declare function computeOpenAiUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
15
15
|
resultContent: string, rawResponse: PartialDeep<Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>>): PromptResultUsage;
|
|
16
16
|
/**
|
|
17
|
-
* TODO: [🤝] DRY Maybe some common abstraction between `
|
|
17
|
+
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
18
18
|
*/
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { OpenAiExecutionTools } from './OpenAiExecutionTools';
|
|
2
|
+
import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
|
|
3
|
+
/**
|
|
4
|
+
* Execution Tools for calling OpenAI API
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/openai`
|
|
7
|
+
*/
|
|
8
|
+
export declare const createOpenAiExecutionTools: ((options: OpenAiExecutionToolsOptions) => OpenAiExecutionTools) & {
|
|
9
|
+
packageName: string;
|
|
10
|
+
className: string;
|
|
11
|
+
};
|
|
12
|
+
/**
|
|
13
|
+
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
14
|
+
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
15
|
+
*/
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @@@ registration2
|
|
3
|
+
*
|
|
4
|
+
* Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/openai`
|
|
7
|
+
*/
|
|
8
|
+
export declare const _OpenAiRegistration: void;
|
|
9
|
+
/**
|
|
10
|
+
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
11
|
+
*/
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { AvailableModel } from '../../execution/
|
|
1
|
+
import type { AvailableModel } from '../../execution/AvailableModel';
|
|
2
2
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
3
3
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
|
4
4
|
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
+
import type { AvailableModel } from '../../../execution/AvailableModel';
|
|
1
2
|
import type { CommonExecutionToolsOptions } from '../../../execution/CommonExecutionToolsOptions';
|
|
2
|
-
import type { AvailableModel } from '../../../execution/LlmExecutionTools';
|
|
3
3
|
import type { client_id } from '../../../types/typeAliases';
|
|
4
4
|
import type { string_base_url } from '../../../types/typeAliases';
|
|
5
5
|
import type { string_uri } from '../../../types/typeAliases';
|
|
@@ -53,4 +53,4 @@ export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
|
|
|
53
53
|
});
|
|
54
54
|
/**
|
|
55
55
|
* TODO: [🧠][🧜♂️] Maybe join remoteUrl and path into single value
|
|
56
|
-
*/
|
|
56
|
+
*/
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import type { TODO_string } from './organization/TODO_string';
|
|
2
|
+
export type Registered = {
|
|
3
|
+
/**
|
|
4
|
+
* @@@
|
|
5
|
+
*/
|
|
6
|
+
packageName: TODO_string;
|
|
7
|
+
/**
|
|
8
|
+
* @@@
|
|
9
|
+
*/
|
|
10
|
+
className: TODO_string;
|
|
11
|
+
};
|
|
12
|
+
/**
|
|
13
|
+
* Register is @@@
|
|
14
|
+
*
|
|
15
|
+
* @private internal utility, exported are only signleton instances of this class
|
|
16
|
+
*/
|
|
17
|
+
export declare class Register<TRegistered extends Registered> {
|
|
18
|
+
private readonly storage;
|
|
19
|
+
constructor(storage: Array<TRegistered>);
|
|
20
|
+
list(): Array<TRegistered>;
|
|
21
|
+
register(registered: TRegistered): void;
|
|
22
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/remote-server",
|
|
3
|
-
"version": "0.66.0-
|
|
3
|
+
"version": "0.66.0-4",
|
|
4
4
|
"description": "Supercharge your use of large language models",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
"module": "./esm/index.es.js",
|
|
48
48
|
"typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
|
|
49
49
|
"peerDependencies": {
|
|
50
|
-
"@promptbook/core": "0.66.0-
|
|
50
|
+
"@promptbook/core": "0.66.0-4"
|
|
51
51
|
},
|
|
52
52
|
"dependencies": {
|
|
53
53
|
"@anthropic-ai/sdk": "0.26.1",
|
package/umd/index.umd.js
CHANGED
|
@@ -16,7 +16,7 @@
|
|
|
16
16
|
/**
|
|
17
17
|
* The version of the Promptbook library
|
|
18
18
|
*/
|
|
19
|
-
var PROMPTBOOK_VERSION = '0.66.0-
|
|
19
|
+
var PROMPTBOOK_VERSION = '0.66.0-3';
|
|
20
20
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
21
21
|
|
|
22
22
|
/*! *****************************************************************************
|
|
@@ -1247,7 +1247,7 @@
|
|
|
1247
1247
|
};
|
|
1248
1248
|
}
|
|
1249
1249
|
/**
|
|
1250
|
-
* TODO: [🤝] DRY Maybe some common abstraction between `
|
|
1250
|
+
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
1251
1251
|
*/
|
|
1252
1252
|
|
|
1253
1253
|
/**
|
|
@@ -1470,7 +1470,7 @@
|
|
|
1470
1470
|
* TODO: [🍆] JSON mode
|
|
1471
1471
|
* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
|
|
1472
1472
|
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
1473
|
-
* TODO: Maybe make custom
|
|
1473
|
+
* TODO: Maybe make custom OpenAiError
|
|
1474
1474
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
1475
1475
|
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
1476
1476
|
* TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
|
|
@@ -1481,7 +1481,7 @@
|
|
|
1481
1481
|
*
|
|
1482
1482
|
* @public exported from `@promptbook/anthropic-claude`
|
|
1483
1483
|
*/
|
|
1484
|
-
|
|
1484
|
+
var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
|
|
1485
1485
|
if (options.isProxied) {
|
|
1486
1486
|
return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
|
|
1487
1487
|
{
|
|
@@ -1492,14 +1492,17 @@
|
|
|
1492
1492
|
},
|
|
1493
1493
|
], models: ANTHROPIC_CLAUDE_MODELS }));
|
|
1494
1494
|
}
|
|
1495
|
-
return new AnthropicClaudeExecutionTools(
|
|
1496
|
-
|
|
1497
|
-
|
|
1498
|
-
|
|
1495
|
+
return new AnthropicClaudeExecutionTools(options);
|
|
1496
|
+
}, {
|
|
1497
|
+
packageName: '@promptbook/anthropic-claude',
|
|
1498
|
+
className: 'AnthropicClaudeExecutionTools',
|
|
1499
|
+
});
|
|
1499
1500
|
/**
|
|
1500
1501
|
* TODO: [🧠] !!!! Make anonymous this with all LLM providers
|
|
1501
|
-
* TODO: [🧠] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
|
|
1502
|
+
* TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
|
|
1502
1503
|
* TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
|
|
1504
|
+
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
1505
|
+
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
1503
1506
|
*/
|
|
1504
1507
|
|
|
1505
1508
|
/**
|
|
@@ -2101,7 +2104,7 @@
|
|
|
2101
2104
|
}());
|
|
2102
2105
|
/**
|
|
2103
2106
|
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
2104
|
-
* TODO: Maybe make custom
|
|
2107
|
+
* TODO: Maybe make custom AzureOpenAiError
|
|
2105
2108
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
2106
2109
|
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
2107
2110
|
*/
|
|
@@ -2115,7 +2118,7 @@
|
|
|
2115
2118
|
* @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
|
|
2116
2119
|
* @private internal utility of `OpenAiExecutionTools`
|
|
2117
2120
|
*/
|
|
2118
|
-
function
|
|
2121
|
+
function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
2119
2122
|
resultContent, rawResponse) {
|
|
2120
2123
|
var _a, _b;
|
|
2121
2124
|
if (rawResponse.usage === undefined) {
|
|
@@ -2141,11 +2144,11 @@
|
|
|
2141
2144
|
};
|
|
2142
2145
|
}
|
|
2143
2146
|
/**
|
|
2144
|
-
* TODO: [🤝] DRY Maybe some common abstraction between `
|
|
2147
|
+
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
2145
2148
|
*/
|
|
2146
2149
|
|
|
2147
2150
|
/**
|
|
2148
|
-
* Execution Tools for calling OpenAI API
|
|
2151
|
+
* Execution Tools for calling OpenAI API
|
|
2149
2152
|
*
|
|
2150
2153
|
* @public exported from `@promptbook/openai`
|
|
2151
2154
|
*/
|
|
@@ -2244,7 +2247,7 @@
|
|
|
2244
2247
|
resultContent = rawResponse.choices[0].message.content;
|
|
2245
2248
|
// eslint-disable-next-line prefer-const
|
|
2246
2249
|
complete = getCurrentIsoDate();
|
|
2247
|
-
usage =
|
|
2250
|
+
usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
|
|
2248
2251
|
if (resultContent === null) {
|
|
2249
2252
|
throw new PipelineExecutionError('No response message from OpenAI');
|
|
2250
2253
|
}
|
|
@@ -2313,7 +2316,7 @@
|
|
|
2313
2316
|
resultContent = rawResponse.choices[0].text;
|
|
2314
2317
|
// eslint-disable-next-line prefer-const
|
|
2315
2318
|
complete = getCurrentIsoDate();
|
|
2316
|
-
usage =
|
|
2319
|
+
usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
|
|
2317
2320
|
return [2 /*return*/, {
|
|
2318
2321
|
content: resultContent,
|
|
2319
2322
|
modelName: rawResponse.model || modelName,
|
|
@@ -2370,7 +2373,7 @@
|
|
|
2370
2373
|
resultContent = rawResponse.data[0].embedding;
|
|
2371
2374
|
// eslint-disable-next-line prefer-const
|
|
2372
2375
|
complete = getCurrentIsoDate();
|
|
2373
|
-
usage =
|
|
2376
|
+
usage = computeOpenAiUsage(content, '', rawResponse);
|
|
2374
2377
|
return [2 /*return*/, {
|
|
2375
2378
|
content: resultContent,
|
|
2376
2379
|
modelName: rawResponse.model || modelName,
|
|
@@ -2444,18 +2447,37 @@
|
|
|
2444
2447
|
/**
|
|
2445
2448
|
* TODO: [🧠][🧙♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
|
|
2446
2449
|
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
2447
|
-
* TODO: Maybe make custom
|
|
2450
|
+
* TODO: Maybe make custom OpenAiError
|
|
2448
2451
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
2449
2452
|
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
2450
2453
|
*/
|
|
2451
2454
|
|
|
2452
2455
|
/**
|
|
2456
|
+
* Execution Tools for calling OpenAI API
|
|
2457
|
+
*
|
|
2458
|
+
* @public exported from `@promptbook/openai`
|
|
2459
|
+
*/
|
|
2460
|
+
var createOpenAiExecutionTools = Object.assign(function (options) {
|
|
2461
|
+
// TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
|
|
2462
|
+
return new OpenAiExecutionTools(options);
|
|
2463
|
+
}, {
|
|
2464
|
+
packageName: '@promptbook/openai',
|
|
2465
|
+
className: 'OpenAiExecutionTools',
|
|
2466
|
+
});
|
|
2467
|
+
/**
|
|
2468
|
+
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
2469
|
+
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
2470
|
+
*/
|
|
2471
|
+
|
|
2472
|
+
/**
|
|
2473
|
+
* @@@
|
|
2474
|
+
*
|
|
2475
|
+
* TODO: !!!!!! Not centralized - register each provider to each package
|
|
2476
|
+
*
|
|
2453
2477
|
* @private internal type for `createLlmToolsFromConfiguration`
|
|
2454
2478
|
*/
|
|
2455
2479
|
var EXECUTION_TOOLS_CLASSES = {
|
|
2456
|
-
createOpenAiExecutionTools:
|
|
2457
|
-
return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
|
|
2458
|
-
},
|
|
2480
|
+
createOpenAiExecutionTools: createOpenAiExecutionTools,
|
|
2459
2481
|
createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
|
|
2460
2482
|
createAzureOpenAiExecutionTools: function (options) {
|
|
2461
2483
|
return new AzureOpenAiExecutionTools(
|
|
@@ -2481,7 +2503,11 @@
|
|
|
2481
2503
|
if (options === void 0) { options = {}; }
|
|
2482
2504
|
var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
|
|
2483
2505
|
var llmTools = configuration.map(function (llmConfiguration) {
|
|
2484
|
-
|
|
2506
|
+
var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
|
|
2507
|
+
if (!constructor) {
|
|
2508
|
+
throw new Error(spaceTrim__default["default"](function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
|
|
2509
|
+
}
|
|
2510
|
+
return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
|
|
2485
2511
|
});
|
|
2486
2512
|
return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
|
|
2487
2513
|
}
|