@promptbook/browser 0.66.0-6 → 0.66.0-8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1 -1
- package/esm/typings/src/_packages/azure-openai.index.d.ts +4 -0
- package/esm/typings/src/_packages/cli.index.d.ts +4 -2
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/cli/main.d.ts +2 -2
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts +1 -1
- package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +1 -1
- package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.test.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +10 -5
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +10 -5
- package/esm/typings/src/llm-providers/azure-openai/createAzureOpenAiExecutionTools.d.ts +15 -0
- package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +9 -0
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +11 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +8 -4
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +8 -4
- package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -5
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +10 -5
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +8 -4
- package/esm/typings/src/personas/preparePersona.test.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +1 -1
package/esm/index.es.js
CHANGED
|
@@ -5,7 +5,7 @@ import { isRunningInBrowser } from 'openai/core';
|
|
|
5
5
|
/**
|
|
6
6
|
* The version of the Promptbook library
|
|
7
7
|
*/
|
|
8
|
-
var PROMPTBOOK_VERSION = '0.66.0-
|
|
8
|
+
var PROMPTBOOK_VERSION = '0.66.0-7';
|
|
9
9
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
10
10
|
|
|
11
11
|
/*! *****************************************************************************
|
|
@@ -1,6 +1,10 @@
|
|
|
1
1
|
import { PROMPTBOOK_VERSION } from '../version';
|
|
2
2
|
import { AzureOpenAiExecutionTools } from '../llm-providers/azure-openai/AzureOpenAiExecutionTools';
|
|
3
3
|
import type { AzureOpenAiExecutionToolsOptions } from '../llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions';
|
|
4
|
+
import { createAzureOpenAiExecutionTools } from '../llm-providers/azure-openai/createAzureOpenAiExecutionTools';
|
|
5
|
+
import { _AzureOpenAiRegistration } from '../llm-providers/azure-openai/register-constructor';
|
|
4
6
|
export { PROMPTBOOK_VERSION };
|
|
5
7
|
export { AzureOpenAiExecutionTools };
|
|
6
8
|
export type { AzureOpenAiExecutionToolsOptions };
|
|
9
|
+
export { createAzureOpenAiExecutionTools };
|
|
10
|
+
export { _AzureOpenAiRegistration };
|
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
import { PROMPTBOOK_VERSION } from '../version';
|
|
2
|
-
import {
|
|
2
|
+
import { _CLI } from '../cli/main';
|
|
3
3
|
import { _AnthropicClaudeMetadataRegistration } from '../llm-providers/anthropic-claude/register-configuration';
|
|
4
|
+
import { _AzureOpenAiMetadataRegistration } from '../llm-providers/azure-openai/register-configuration';
|
|
4
5
|
import { _OpenAiMetadataRegistration } from '../llm-providers/openai/register-configuration';
|
|
5
6
|
export { PROMPTBOOK_VERSION };
|
|
6
|
-
export {
|
|
7
|
+
export { _CLI };
|
|
7
8
|
export { _AnthropicClaudeMetadataRegistration };
|
|
9
|
+
export { _AzureOpenAiMetadataRegistration };
|
|
8
10
|
export { _OpenAiMetadataRegistration };
|
|
@@ -54,6 +54,7 @@ import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTool
|
|
|
54
54
|
import { countTotalUsage } from '../llm-providers/_common/utils/count-total-usage/countTotalUsage';
|
|
55
55
|
import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
|
|
56
56
|
import { _AnthropicClaudeMetadataRegistration } from '../llm-providers/anthropic-claude/register-configuration';
|
|
57
|
+
import { _AzureOpenAiMetadataRegistration } from '../llm-providers/azure-openai/register-configuration';
|
|
57
58
|
import { joinLlmExecutionTools } from '../llm-providers/multiple/joinLlmExecutionTools';
|
|
58
59
|
import { _OpenAiMetadataRegistration } from '../llm-providers/openai/register-configuration';
|
|
59
60
|
import { preparePersona } from '../personas/preparePersona';
|
|
@@ -124,6 +125,7 @@ export { cacheLlmTools };
|
|
|
124
125
|
export { countTotalUsage };
|
|
125
126
|
export { limitTotalUsage };
|
|
126
127
|
export { _AnthropicClaudeMetadataRegistration };
|
|
128
|
+
export { _AzureOpenAiMetadataRegistration };
|
|
127
129
|
export { joinLlmExecutionTools };
|
|
128
130
|
export { _OpenAiMetadataRegistration };
|
|
129
131
|
export { preparePersona };
|
|
@@ -4,8 +4,8 @@ import { promptbookCli } from './promptbookCli';
|
|
|
4
4
|
*
|
|
5
5
|
* @public exported from `@promptbook/cli`
|
|
6
6
|
*/
|
|
7
|
-
export declare const
|
|
8
|
-
|
|
7
|
+
export declare const _CLI: {
|
|
8
|
+
_initialize: typeof promptbookCli;
|
|
9
9
|
};
|
|
10
10
|
/**
|
|
11
11
|
* Note: [🟡] This code should never be published outside of `@promptbook/cli`
|
package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
|
|
1
|
+
export {};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
|
|
1
|
+
export {};
|
package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import '../../_packages/core.index';
|
|
1
2
|
import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
|
|
2
3
|
import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
|
3
4
|
type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromConfigurationOptions & {
|
|
@@ -17,7 +17,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
17
17
|
/**
|
|
18
18
|
* Anthropic Claude API client.
|
|
19
19
|
*/
|
|
20
|
-
private
|
|
20
|
+
private client;
|
|
21
21
|
/**
|
|
22
22
|
* Creates Anthropic Claude Execution Tools.
|
|
23
23
|
*
|
|
@@ -26,6 +26,15 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
26
26
|
constructor(options?: AnthropicClaudeExecutionToolsDirectOptions);
|
|
27
27
|
get title(): string_title & string_markdown_text;
|
|
28
28
|
get description(): string_markdown;
|
|
29
|
+
private getClient;
|
|
30
|
+
/**
|
|
31
|
+
* Check the `options` passed to `constructor`
|
|
32
|
+
*/
|
|
33
|
+
checkConfiguration(): Promise<void>;
|
|
34
|
+
/**
|
|
35
|
+
* List all available Anthropic Claude models that can be used
|
|
36
|
+
*/
|
|
37
|
+
listModels(): Array<AvailableModel>;
|
|
29
38
|
/**
|
|
30
39
|
* Calls Anthropic Claude API to use a chat model.
|
|
31
40
|
*/
|
|
@@ -38,10 +47,6 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
38
47
|
* Default model for chat variant.
|
|
39
48
|
*/
|
|
40
49
|
private getDefaultChatModel;
|
|
41
|
-
/**
|
|
42
|
-
* List all available Anthropic Claude models that can be used
|
|
43
|
-
*/
|
|
44
|
-
listModels(): Array<AvailableModel>;
|
|
45
50
|
}
|
|
46
51
|
/**
|
|
47
52
|
* TODO: [🍆] JSON mode
|
|
@@ -17,7 +17,7 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
|
|
|
17
17
|
/**
|
|
18
18
|
* OpenAI Azure API client.
|
|
19
19
|
*/
|
|
20
|
-
private
|
|
20
|
+
private client;
|
|
21
21
|
/**
|
|
22
22
|
* Creates OpenAI Execution Tools.
|
|
23
23
|
*
|
|
@@ -26,6 +26,15 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
|
|
|
26
26
|
constructor(options: AzureOpenAiExecutionToolsOptions);
|
|
27
27
|
get title(): string_title & string_markdown_text;
|
|
28
28
|
get description(): string_markdown;
|
|
29
|
+
private getClient;
|
|
30
|
+
/**
|
|
31
|
+
* Check the `options` passed to `constructor`
|
|
32
|
+
*/
|
|
33
|
+
checkConfiguration(): Promise<void>;
|
|
34
|
+
/**
|
|
35
|
+
* List all available Azure OpenAI models that can be used
|
|
36
|
+
*/
|
|
37
|
+
listModels(): Promise<Array<AvailableModel>>;
|
|
29
38
|
/**
|
|
30
39
|
* Calls OpenAI API to use a chat model.
|
|
31
40
|
*/
|
|
@@ -38,10 +47,6 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
|
|
|
38
47
|
* Changes Azure error (which is not propper Error but object) to propper Error
|
|
39
48
|
*/
|
|
40
49
|
private transformAzureError;
|
|
41
|
-
/**
|
|
42
|
-
* List all available Azure OpenAI models that can be used
|
|
43
|
-
*/
|
|
44
|
-
listModels(): Promise<Array<AvailableModel>>;
|
|
45
50
|
}
|
|
46
51
|
/**
|
|
47
52
|
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { AzureOpenAiExecutionTools } from './AzureOpenAiExecutionTools';
|
|
2
|
+
import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToolsOptions';
|
|
3
|
+
/**
|
|
4
|
+
* Execution Tools for calling Azure OpenAI API
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/azure-openai`
|
|
7
|
+
*/
|
|
8
|
+
export declare const createAzureOpenAiExecutionTools: ((options: AzureOpenAiExecutionToolsOptions) => AzureOpenAiExecutionTools) & {
|
|
9
|
+
packageName: string;
|
|
10
|
+
className: string;
|
|
11
|
+
};
|
|
12
|
+
/**
|
|
13
|
+
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
14
|
+
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
15
|
+
*/
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @@@ registration1 of default configuration for Azure Open AI
|
|
3
|
+
*
|
|
4
|
+
* Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/core`
|
|
7
|
+
* @public exported from `@promptbook/cli`
|
|
8
|
+
*/
|
|
9
|
+
export declare const _AzureOpenAiMetadataRegistration: void;
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @@@ registration2
|
|
3
|
+
*
|
|
4
|
+
* Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
|
|
5
|
+
*
|
|
6
|
+
* @public exported from `@promptbook/azure-openai`
|
|
7
|
+
*/
|
|
8
|
+
export declare const _AzureOpenAiRegistration: void;
|
|
9
|
+
/**
|
|
10
|
+
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
11
|
+
*/
|
|
@@ -17,6 +17,14 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
|
|
|
17
17
|
constructor(options?: CommonExecutionToolsOptions);
|
|
18
18
|
get title(): string_title & string_markdown_text;
|
|
19
19
|
get description(): string_markdown;
|
|
20
|
+
/**
|
|
21
|
+
* Does nothing, just to implement the interface
|
|
22
|
+
*/
|
|
23
|
+
checkConfiguration(): void;
|
|
24
|
+
/**
|
|
25
|
+
* List all available mocked-models that can be used
|
|
26
|
+
*/
|
|
27
|
+
listModels(): Array<AvailableModel>;
|
|
20
28
|
/**
|
|
21
29
|
* Mocks chat model
|
|
22
30
|
*/
|
|
@@ -25,10 +33,6 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
|
|
|
25
33
|
* Mocks completion model
|
|
26
34
|
*/
|
|
27
35
|
callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
|
|
28
|
-
/**
|
|
29
|
-
* List all available mocked-models that can be used
|
|
30
|
-
*/
|
|
31
|
-
listModels(): Array<AvailableModel>;
|
|
32
36
|
}
|
|
33
37
|
/**
|
|
34
38
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
@@ -18,6 +18,14 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
|
|
|
18
18
|
constructor(options?: CommonExecutionToolsOptions);
|
|
19
19
|
get title(): string_title & string_markdown_text;
|
|
20
20
|
get description(): string_markdown;
|
|
21
|
+
/**
|
|
22
|
+
* Does nothing, just to implement the interface
|
|
23
|
+
*/
|
|
24
|
+
checkConfiguration(): void;
|
|
25
|
+
/**
|
|
26
|
+
* List all available fake-models that can be used
|
|
27
|
+
*/
|
|
28
|
+
listModels(): Array<AvailableModel>;
|
|
21
29
|
/**
|
|
22
30
|
* Fakes chat model
|
|
23
31
|
*/
|
|
@@ -30,10 +38,6 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
|
|
|
30
38
|
* Fakes embedding model
|
|
31
39
|
*/
|
|
32
40
|
callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<EmbeddingPromptResult>;
|
|
33
|
-
/**
|
|
34
|
-
* List all available fake-models that can be used
|
|
35
|
-
*/
|
|
36
|
-
listModels(): Array<AvailableModel>;
|
|
37
41
|
}
|
|
38
42
|
/**
|
|
39
43
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
@@ -28,6 +28,15 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
|
28
28
|
constructor(...llmExecutionTools: Array<LlmExecutionTools>);
|
|
29
29
|
get title(): string_title & string_markdown_text;
|
|
30
30
|
get description(): string_markdown;
|
|
31
|
+
/**
|
|
32
|
+
* Check the configuration of all execution tools
|
|
33
|
+
*/
|
|
34
|
+
checkConfiguration(): Promise<void>;
|
|
35
|
+
/**
|
|
36
|
+
* List all available models that can be used
|
|
37
|
+
* This lists is a combination of all available models from all execution tools
|
|
38
|
+
*/
|
|
39
|
+
listModels(): Promise<Array<AvailableModel>>;
|
|
31
40
|
/**
|
|
32
41
|
* Calls the best available chat model
|
|
33
42
|
*/
|
|
@@ -46,11 +55,6 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
|
46
55
|
* Note: This should be private or protected but is public to be usable with duck typing
|
|
47
56
|
*/
|
|
48
57
|
callCommonModel(prompt: Prompt): Promise<PromptResult>;
|
|
49
|
-
/**
|
|
50
|
-
* List all available models that can be used
|
|
51
|
-
* This lists is a combination of all available models from all execution tools
|
|
52
|
-
*/
|
|
53
|
-
listModels(): Promise<Array<AvailableModel>>;
|
|
54
58
|
}
|
|
55
59
|
/**
|
|
56
60
|
* TODO: [🧠][🎛] Aggregating multiple models - have result not only from one first aviable model BUT all of them
|
|
@@ -18,7 +18,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
18
18
|
/**
|
|
19
19
|
* OpenAI API client.
|
|
20
20
|
*/
|
|
21
|
-
private
|
|
21
|
+
private client;
|
|
22
22
|
/**
|
|
23
23
|
* Creates OpenAI Execution Tools.
|
|
24
24
|
*
|
|
@@ -27,6 +27,15 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
27
27
|
constructor(options?: OpenAiExecutionToolsOptions);
|
|
28
28
|
get title(): string_title & string_markdown_text;
|
|
29
29
|
get description(): string_markdown;
|
|
30
|
+
private getClient;
|
|
31
|
+
/**
|
|
32
|
+
* Check the `options` passed to `constructor`
|
|
33
|
+
*/
|
|
34
|
+
checkConfiguration(): Promise<void>;
|
|
35
|
+
/**
|
|
36
|
+
* List all available OpenAI models that can be used
|
|
37
|
+
*/
|
|
38
|
+
listModels(): Array<AvailableModel>;
|
|
30
39
|
/**
|
|
31
40
|
* Calls OpenAI API to use a chat model.
|
|
32
41
|
*/
|
|
@@ -55,10 +64,6 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
|
|
|
55
64
|
* Default model for completion variant.
|
|
56
65
|
*/
|
|
57
66
|
private getDefaultEmbeddingModel;
|
|
58
|
-
/**
|
|
59
|
-
* List all available OpenAI models that can be used
|
|
60
|
-
*/
|
|
61
|
-
listModels(): Array<AvailableModel>;
|
|
62
67
|
}
|
|
63
68
|
/**
|
|
64
69
|
* TODO: [🧠][🧙♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
|
|
@@ -24,6 +24,14 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
|
|
|
24
24
|
constructor(options: RemoteLlmExecutionToolsOptions);
|
|
25
25
|
get title(): string_title & string_markdown_text;
|
|
26
26
|
get description(): string_markdown;
|
|
27
|
+
/**
|
|
28
|
+
* Check the configuration of all execution tools
|
|
29
|
+
*/
|
|
30
|
+
checkConfiguration(): Promise<void>;
|
|
31
|
+
/**
|
|
32
|
+
* List all available models that can be used
|
|
33
|
+
*/
|
|
34
|
+
listModels(): Promise<Array<AvailableModel>>;
|
|
27
35
|
/**
|
|
28
36
|
* Creates a connection to the remote proxy server.
|
|
29
37
|
*/
|
|
@@ -44,10 +52,6 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
|
|
|
44
52
|
* Calls remote proxy server to use both completion or chat model
|
|
45
53
|
*/
|
|
46
54
|
private callCommonModel;
|
|
47
|
-
/**
|
|
48
|
-
* List all available models that can be used
|
|
49
|
-
*/
|
|
50
|
-
listModels(): Promise<Array<AvailableModel>>;
|
|
51
55
|
}
|
|
52
56
|
/**
|
|
53
57
|
* TODO: [🍓] Allow to list compatible models with each variant
|
|
@@ -1 +1 @@
|
|
|
1
|
-
|
|
1
|
+
export {};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/browser",
|
|
3
|
-
"version": "0.66.0-
|
|
3
|
+
"version": "0.66.0-8",
|
|
4
4
|
"description": "Supercharge your use of large language models",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
"module": "./esm/index.es.js",
|
|
48
48
|
"typings": "./esm/typings/src/_packages/browser.index.d.ts",
|
|
49
49
|
"peerDependencies": {
|
|
50
|
-
"@promptbook/core": "0.66.0-
|
|
50
|
+
"@promptbook/core": "0.66.0-8"
|
|
51
51
|
},
|
|
52
52
|
"dependencies": {
|
|
53
53
|
"spacetrim": "0.11.39"
|
package/umd/index.umd.js
CHANGED
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
/**
|
|
9
9
|
* The version of the Promptbook library
|
|
10
10
|
*/
|
|
11
|
-
var PROMPTBOOK_VERSION = '0.66.0-
|
|
11
|
+
var PROMPTBOOK_VERSION = '0.66.0-7';
|
|
12
12
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
13
13
|
|
|
14
14
|
/*! *****************************************************************************
|