@promptbook/openai 0.52.0-2 → 0.52.0-4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +44 -3
- package/esm/typings/_packages/core.index.d.ts +2 -1
- package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
- package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts +35 -0
- package/esm/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionToolsOptions.d.ts +23 -0
- package/esm/typings/execution/plugins/llm-execution-tools/multiple/playground/playground.d.ts +2 -0
- package/esm/typings/execution/plugins/script-execution-tools/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
- package/esm/typings/execution/plugins/script-execution-tools/javascript/JavascriptExecutionTools.d.ts +1 -1
- package/package.json +2 -2
- package/umd/typings/_packages/core.index.d.ts +2 -1
- package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
- package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
- package/umd/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts +35 -0
- package/umd/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionToolsOptions.d.ts +23 -0
- package/umd/typings/execution/plugins/llm-execution-tools/multiple/playground/playground.d.ts +2 -0
- package/umd/typings/execution/plugins/script-execution-tools/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
- package/umd/typings/execution/plugins/script-execution-tools/javascript/JavascriptExecutionTools.d.ts +1 -1
package/README.md
CHANGED
|
@@ -34,7 +34,45 @@ Wrapper around [OpenAI's SDK](https://www.npmjs.com/package/openai) to make it e
|
|
|
34
34
|
|
|
35
35
|
|
|
36
36
|
|
|
37
|
+
## Usage
|
|
37
38
|
|
|
39
|
+
```typescript
|
|
40
|
+
import { createPromptbookExecutor, createPromptbookLibraryFromDirectory } from '@promptbook/core';
|
|
41
|
+
import { JavascriptEvalExecutionTools } from '@promptbook/execute-javascript';
|
|
42
|
+
import { OpenAiExecutionTools } from '@promptbook/openai';
|
|
43
|
+
import { assertsExecutionSuccessful } from '@promptbook/utils';
|
|
44
|
+
|
|
45
|
+
// TODO: !!!!! Test that this code works
|
|
46
|
+
// TODO: !!!!! Comment
|
|
47
|
+
|
|
48
|
+
const library = createPromptbookLibraryFromDirectory('./promptbook');
|
|
49
|
+
const promptbook = library.getPromptbookByUrl(`https://promptbook.studio/my-library/write-article.ptbk.md`);
|
|
50
|
+
|
|
51
|
+
const tools = {
|
|
52
|
+
llm: new OpenAiExecutionTools({
|
|
53
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
54
|
+
}),
|
|
55
|
+
script: [new JavascriptEvalExecutionTools()],
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
const promptbookExecutor = createPromptbookExecutor({ promptbook, tools });
|
|
59
|
+
|
|
60
|
+
const inputParameters = { word: 'cat' };
|
|
61
|
+
const { isSuccessful, errors, outputParameters, executionReport } = await promptbookExecutor(inputParameters);
|
|
62
|
+
|
|
63
|
+
console.info(outputParameters);
|
|
64
|
+
|
|
65
|
+
assertsExecutionSuccessful({ isSuccessful, errors });
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
## Other models
|
|
71
|
+
|
|
72
|
+
See the other models available in the Promptbook package:
|
|
73
|
+
|
|
74
|
+
- [Azure OpenAI](https://www.npmjs.com/package/@promptbook/azure-openai)
|
|
75
|
+
- [Anthropic Claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)
|
|
38
76
|
|
|
39
77
|
|
|
40
78
|
---
|
|
@@ -493,9 +531,10 @@ Internally it calls OpenAI, Azure, GPU, proxy, cache, logging,...
|
|
|
493
531
|
- _(Not implemented yet)_ `BardExecutionTools`
|
|
494
532
|
- _(Not implemented yet)_ `LamaExecutionTools`
|
|
495
533
|
- _(Not implemented yet)_ `GpuExecutionTools`
|
|
496
|
-
- And a special case are `
|
|
497
|
-
-
|
|
498
|
-
- The
|
|
534
|
+
- And a special case are `MultipleLlmExecutionTools` that combines multiple execution tools together and tries to execute the prompt on the best one.
|
|
535
|
+
- Another special case are `RemoteLlmExecutionTools` that connect to a remote server and run one of the above execution tools on that server.
|
|
536
|
+
- The another special case is `MockedEchoLlmExecutionTools` that is used for testing and mocking.
|
|
537
|
+
- The another special case is `LogLlmExecutionToolsWrapper` that is technically also an execution tools but it is more proxy wrapper around other execution tools that logs all calls to execution tools.
|
|
499
538
|
|
|
500
539
|
#### Script Execution Tools
|
|
501
540
|
|
|
@@ -594,6 +633,8 @@ Execution report is a simple object or markdown that contains information about
|
|
|
594
633
|
|
|
595
634
|
|
|
596
635
|
|
|
636
|
+
|
|
637
|
+
|
|
597
638
|
### Remote server
|
|
598
639
|
|
|
599
640
|
Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
|
|
@@ -11,9 +11,10 @@ import { createPromptbookLibraryFromSources } from '../library/constructors/crea
|
|
|
11
11
|
import { createPromptbookSublibrary } from '../library/constructors/createPromptbookSublibrary';
|
|
12
12
|
import { ExecutionTypes } from '../types/ExecutionTypes';
|
|
13
13
|
import { PROMPTBOOK_VERSION } from '../version';
|
|
14
|
+
import { MultipleLlmExecutionTools } from '../execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools';
|
|
14
15
|
export { ExecutionTypes, PROMPTBOOK_VERSION };
|
|
15
16
|
export { createPromptbookLibraryFromPromise, createPromptbookLibraryFromSources, createPromptbookSublibrary, SimplePromptbookLibrary, };
|
|
16
17
|
export { SimplePromptInterfaceTools };
|
|
17
18
|
export { promptbookStringToJson, promptbookJsonToString, validatePromptbookJson };
|
|
18
|
-
export { createPromptbookExecutor };
|
|
19
|
+
export { createPromptbookExecutor, MultipleLlmExecutionTools };
|
|
19
20
|
export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
|
|
@@ -35,6 +35,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
35
35
|
listModels(): Array<AvailableModel>;
|
|
36
36
|
}
|
|
37
37
|
/**
|
|
38
|
+
* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
|
|
38
39
|
* TODO: [🍓][♐] Allow to list compatible models with each variant
|
|
39
40
|
* TODO: Maybe Create some common util for gptChat and gptComplete
|
|
40
41
|
* TODO: Maybe make custom OpenaiError
|
|
@@ -25,6 +25,10 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
|
|
|
25
25
|
* Calls Azure OpenAI API to use a complete model.
|
|
26
26
|
*/
|
|
27
27
|
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
|
|
28
|
+
/**
|
|
29
|
+
* Changes Azure error (which is not propper Error but object) to propper Error
|
|
30
|
+
*/
|
|
31
|
+
private transformAzureError;
|
|
28
32
|
/**
|
|
29
33
|
* List all available Azure OpenAI models that can be used
|
|
30
34
|
*/
|
package/esm/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import type { Prompt } from '../../../../types/Prompt';
|
|
2
|
+
import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
|
|
3
|
+
import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
|
|
4
|
+
/**
|
|
5
|
+
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
|
6
|
+
*
|
|
7
|
+
* @see https://github.com/webgptorg/promptbook#multiple-server
|
|
8
|
+
*/
|
|
9
|
+
export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
10
|
+
/**
|
|
11
|
+
* Array of execution tools in order of priority
|
|
12
|
+
*/
|
|
13
|
+
private llmExecutionTools;
|
|
14
|
+
/**
|
|
15
|
+
* Gets array of execution tools in order of priority
|
|
16
|
+
*/
|
|
17
|
+
constructor(...llmExecutionTools: Array<LlmExecutionTools>);
|
|
18
|
+
/**
|
|
19
|
+
* Calls the best available chat model
|
|
20
|
+
*/
|
|
21
|
+
gptChat(prompt: Prompt): Promise<PromptChatResult>;
|
|
22
|
+
/**
|
|
23
|
+
* Calls the best available completion model
|
|
24
|
+
*/
|
|
25
|
+
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
|
|
26
|
+
/**
|
|
27
|
+
* Calls the best available model
|
|
28
|
+
*/
|
|
29
|
+
private gptCommon;
|
|
30
|
+
/**
|
|
31
|
+
* List all available models that can be used
|
|
32
|
+
* This liost is a combination of all available models from all execution tools
|
|
33
|
+
*/
|
|
34
|
+
listModels(): Promise<Array<AvailableModel>>;
|
|
35
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { client_id, string_uri } from '../../../../types/typeAliases';
|
|
2
|
+
import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
|
|
3
|
+
/**
|
|
4
|
+
* Options for MultipleLlmExecutionTools
|
|
5
|
+
*/
|
|
6
|
+
export type MultipleLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
|
|
7
|
+
/**
|
|
8
|
+
* URL of the multiple PROMPTBOOK server
|
|
9
|
+
* On this server will be connected to the socket.io server
|
|
10
|
+
*/
|
|
11
|
+
readonly multipleUrl: URL;
|
|
12
|
+
/**
|
|
13
|
+
* Path for the Socket.io server to listen
|
|
14
|
+
*
|
|
15
|
+
* @default '/socket.io'
|
|
16
|
+
* @example '/promptbook/socket.io'
|
|
17
|
+
*/
|
|
18
|
+
readonly path: string_uri;
|
|
19
|
+
/**
|
|
20
|
+
* Your client ID
|
|
21
|
+
*/
|
|
22
|
+
readonly clientId: client_id;
|
|
23
|
+
};
|
|
@@ -8,7 +8,7 @@ import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptio
|
|
|
8
8
|
*/
|
|
9
9
|
export declare class JavascriptEvalExecutionTools implements ScriptExecutionTools {
|
|
10
10
|
private readonly options;
|
|
11
|
-
constructor(options
|
|
11
|
+
constructor(options?: JavascriptExecutionToolsOptions);
|
|
12
12
|
/**
|
|
13
13
|
* Executes a JavaScript
|
|
14
14
|
*/
|
|
@@ -7,7 +7,7 @@ import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptio
|
|
|
7
7
|
*/
|
|
8
8
|
export declare class JavascriptExecutionTools implements ScriptExecutionTools {
|
|
9
9
|
private readonly options;
|
|
10
|
-
constructor(options
|
|
10
|
+
constructor(options?: JavascriptExecutionToolsOptions);
|
|
11
11
|
/**
|
|
12
12
|
* Executes a JavaScript
|
|
13
13
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/openai",
|
|
3
|
-
"version": "0.52.0-
|
|
3
|
+
"version": "0.52.0-4",
|
|
4
4
|
"description": "Library to supercharge your use of large language models",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
}
|
|
49
49
|
],
|
|
50
50
|
"peerDependencies": {
|
|
51
|
-
"@promptbook/core": "0.52.0-
|
|
51
|
+
"@promptbook/core": "0.52.0-4"
|
|
52
52
|
},
|
|
53
53
|
"main": "./umd/index.umd.js",
|
|
54
54
|
"module": "./esm/index.es.js",
|
|
@@ -11,9 +11,10 @@ import { createPromptbookLibraryFromSources } from '../library/constructors/crea
|
|
|
11
11
|
import { createPromptbookSublibrary } from '../library/constructors/createPromptbookSublibrary';
|
|
12
12
|
import { ExecutionTypes } from '../types/ExecutionTypes';
|
|
13
13
|
import { PROMPTBOOK_VERSION } from '../version';
|
|
14
|
+
import { MultipleLlmExecutionTools } from '../execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools';
|
|
14
15
|
export { ExecutionTypes, PROMPTBOOK_VERSION };
|
|
15
16
|
export { createPromptbookLibraryFromPromise, createPromptbookLibraryFromSources, createPromptbookSublibrary, SimplePromptbookLibrary, };
|
|
16
17
|
export { SimplePromptInterfaceTools };
|
|
17
18
|
export { promptbookStringToJson, promptbookJsonToString, validatePromptbookJson };
|
|
18
|
-
export { createPromptbookExecutor };
|
|
19
|
+
export { createPromptbookExecutor, MultipleLlmExecutionTools };
|
|
19
20
|
export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
|
|
@@ -35,6 +35,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
|
35
35
|
listModels(): Array<AvailableModel>;
|
|
36
36
|
}
|
|
37
37
|
/**
|
|
38
|
+
* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
|
|
38
39
|
* TODO: [🍓][♐] Allow to list compatible models with each variant
|
|
39
40
|
* TODO: Maybe Create some common util for gptChat and gptComplete
|
|
40
41
|
* TODO: Maybe make custom OpenaiError
|
|
@@ -25,6 +25,10 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
|
|
|
25
25
|
* Calls Azure OpenAI API to use a complete model.
|
|
26
26
|
*/
|
|
27
27
|
gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
|
|
28
|
+
/**
|
|
29
|
+
* Changes Azure error (which is not propper Error but object) to propper Error
|
|
30
|
+
*/
|
|
31
|
+
private transformAzureError;
|
|
28
32
|
/**
|
|
29
33
|
* List all available Azure OpenAI models that can be used
|
|
30
34
|
*/
|
package/umd/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import type { Prompt } from '../../../../types/Prompt';
|
|
2
|
+
import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
|
|
3
|
+
import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
|
|
4
|
+
/**
|
|
5
|
+
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
|
6
|
+
*
|
|
7
|
+
* @see https://github.com/webgptorg/promptbook#multiple-server
|
|
8
|
+
*/
|
|
9
|
+
export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
|
|
10
|
+
/**
|
|
11
|
+
* Array of execution tools in order of priority
|
|
12
|
+
*/
|
|
13
|
+
private llmExecutionTools;
|
|
14
|
+
/**
|
|
15
|
+
* Gets array of execution tools in order of priority
|
|
16
|
+
*/
|
|
17
|
+
constructor(...llmExecutionTools: Array<LlmExecutionTools>);
|
|
18
|
+
/**
|
|
19
|
+
* Calls the best available chat model
|
|
20
|
+
*/
|
|
21
|
+
gptChat(prompt: Prompt): Promise<PromptChatResult>;
|
|
22
|
+
/**
|
|
23
|
+
* Calls the best available completion model
|
|
24
|
+
*/
|
|
25
|
+
gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
|
|
26
|
+
/**
|
|
27
|
+
* Calls the best available model
|
|
28
|
+
*/
|
|
29
|
+
private gptCommon;
|
|
30
|
+
/**
|
|
31
|
+
* List all available models that can be used
|
|
32
|
+
* This liost is a combination of all available models from all execution tools
|
|
33
|
+
*/
|
|
34
|
+
listModels(): Promise<Array<AvailableModel>>;
|
|
35
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { client_id, string_uri } from '../../../../types/typeAliases';
|
|
2
|
+
import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
|
|
3
|
+
/**
|
|
4
|
+
* Options for MultipleLlmExecutionTools
|
|
5
|
+
*/
|
|
6
|
+
export type MultipleLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
|
|
7
|
+
/**
|
|
8
|
+
* URL of the multiple PROMPTBOOK server
|
|
9
|
+
* On this server will be connected to the socket.io server
|
|
10
|
+
*/
|
|
11
|
+
readonly multipleUrl: URL;
|
|
12
|
+
/**
|
|
13
|
+
* Path for the Socket.io server to listen
|
|
14
|
+
*
|
|
15
|
+
* @default '/socket.io'
|
|
16
|
+
* @example '/promptbook/socket.io'
|
|
17
|
+
*/
|
|
18
|
+
readonly path: string_uri;
|
|
19
|
+
/**
|
|
20
|
+
* Your client ID
|
|
21
|
+
*/
|
|
22
|
+
readonly clientId: client_id;
|
|
23
|
+
};
|
|
@@ -8,7 +8,7 @@ import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptio
|
|
|
8
8
|
*/
|
|
9
9
|
export declare class JavascriptEvalExecutionTools implements ScriptExecutionTools {
|
|
10
10
|
private readonly options;
|
|
11
|
-
constructor(options
|
|
11
|
+
constructor(options?: JavascriptExecutionToolsOptions);
|
|
12
12
|
/**
|
|
13
13
|
* Executes a JavaScript
|
|
14
14
|
*/
|
|
@@ -7,7 +7,7 @@ import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptio
|
|
|
7
7
|
*/
|
|
8
8
|
export declare class JavascriptExecutionTools implements ScriptExecutionTools {
|
|
9
9
|
private readonly options;
|
|
10
|
-
constructor(options
|
|
10
|
+
constructor(options?: JavascriptExecutionToolsOptions);
|
|
11
11
|
/**
|
|
12
12
|
* Executes a JavaScript
|
|
13
13
|
*/
|