@promptbook/cli 0.52.0-2 → 0.52.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. package/README.md +6 -3
  2. package/esm/index.es.js +1 -1
  3. package/esm/typings/_packages/core.index.d.ts +2 -1
  4. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  5. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  6. package/esm/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts +35 -0
  7. package/esm/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionToolsOptions.d.ts +23 -0
  8. package/esm/typings/execution/plugins/llm-execution-tools/multiple/playground/playground.d.ts +2 -0
  9. package/esm/typings/execution/plugins/script-execution-tools/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  10. package/esm/typings/execution/plugins/script-execution-tools/javascript/JavascriptExecutionTools.d.ts +1 -1
  11. package/package.json +2 -2
  12. package/umd/index.umd.js +1 -1
  13. package/umd/typings/_packages/core.index.d.ts +2 -1
  14. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  15. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  16. package/umd/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts +35 -0
  17. package/umd/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionToolsOptions.d.ts +23 -0
  18. package/umd/typings/execution/plugins/llm-execution-tools/multiple/playground/playground.d.ts +2 -0
  19. package/umd/typings/execution/plugins/script-execution-tools/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  20. package/umd/typings/execution/plugins/script-execution-tools/javascript/JavascriptExecutionTools.d.ts +1 -1
package/README.md CHANGED
@@ -498,9 +498,10 @@ Internally it calls OpenAI, Azure, GPU, proxy, cache, logging,...
498
498
  - _(Not implemented yet)_ `BardExecutionTools`
499
499
  - _(Not implemented yet)_ `LamaExecutionTools`
500
500
  - _(Not implemented yet)_ `GpuExecutionTools`
501
- - And a special case are `RemoteLlmExecutionTools` that connect to a remote server and run one of the above execution tools on that server.
502
- - The second special case is `MockedEchoLlmExecutionTools` that is used for testing and mocking.
503
- - The third special case is `LogLlmExecutionToolsWrapper` that is technically also an execution tools but it is more proxy wrapper around other execution tools that logs all calls to execution tools.
501
+ - And a special case are `MultipleLlmExecutionTools` that combines multiple execution tools together and tries to execute the prompt on the best one.
502
+ - Another special case are `RemoteLlmExecutionTools` that connect to a remote server and run one of the above execution tools on that server.
503
+ - The another special case is `MockedEchoLlmExecutionTools` that is used for testing and mocking.
504
+ - The another special case is `LogLlmExecutionToolsWrapper` that is technically also an execution tools but it is more proxy wrapper around other execution tools that logs all calls to execution tools.
504
505
 
505
506
  #### Script Execution Tools
506
507
 
@@ -599,6 +600,8 @@ Execution report is a simple object or markdown that contains information about
599
600
 
600
601
 
601
602
 
603
+
604
+
602
605
  ### Remote server
603
606
 
604
607
  Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
package/esm/index.es.js CHANGED
@@ -143,7 +143,7 @@ new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined'
143
143
  /**
144
144
  * The version of the Promptbook library
145
145
  */
146
- var PROMPTBOOK_VERSION = '0.52.0-1';
146
+ var PROMPTBOOK_VERSION = '0.52.0-3';
147
147
 
148
148
  /**
149
149
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -11,9 +11,10 @@ import { createPromptbookLibraryFromSources } from '../library/constructors/crea
11
11
  import { createPromptbookSublibrary } from '../library/constructors/createPromptbookSublibrary';
12
12
  import { ExecutionTypes } from '../types/ExecutionTypes';
13
13
  import { PROMPTBOOK_VERSION } from '../version';
14
+ import { MultipleLlmExecutionTools } from '../execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools';
14
15
  export { ExecutionTypes, PROMPTBOOK_VERSION };
15
16
  export { createPromptbookLibraryFromPromise, createPromptbookLibraryFromSources, createPromptbookSublibrary, SimplePromptbookLibrary, };
16
17
  export { SimplePromptInterfaceTools };
17
18
  export { promptbookStringToJson, promptbookJsonToString, validatePromptbookJson };
18
- export { createPromptbookExecutor };
19
+ export { createPromptbookExecutor, MultipleLlmExecutionTools };
19
20
  export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
@@ -35,6 +35,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
35
35
  listModels(): Array<AvailableModel>;
36
36
  }
37
37
  /**
38
+ * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
38
39
  * TODO: [🍓][♐] Allow to list compatible models with each variant
39
40
  * TODO: Maybe Create some common util for gptChat and gptComplete
40
41
  * TODO: Maybe make custom OpenaiError
@@ -25,6 +25,10 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
25
25
  * Calls Azure OpenAI API to use a complete model.
26
26
  */
27
27
  gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * Changes Azure error (which is not propper Error but object) to propper Error
30
+ */
31
+ private transformAzureError;
28
32
  /**
29
33
  * List all available Azure OpenAI models that can be used
30
34
  */
@@ -0,0 +1,35 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ /**
5
+ * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
6
+ *
7
+ * @see https://github.com/webgptorg/promptbook#multiple-server
8
+ */
9
+ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
10
+ /**
11
+ * Array of execution tools in order of priority
12
+ */
13
+ private llmExecutionTools;
14
+ /**
15
+ * Gets array of execution tools in order of priority
16
+ */
17
+ constructor(...llmExecutionTools: Array<LlmExecutionTools>);
18
+ /**
19
+ * Calls the best available chat model
20
+ */
21
+ gptChat(prompt: Prompt): Promise<PromptChatResult>;
22
+ /**
23
+ * Calls the best available completion model
24
+ */
25
+ gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
26
+ /**
27
+ * Calls the best available model
28
+ */
29
+ private gptCommon;
30
+ /**
31
+ * List all available models that can be used
32
+ * This liost is a combination of all available models from all execution tools
33
+ */
34
+ listModels(): Promise<Array<AvailableModel>>;
35
+ }
@@ -0,0 +1,23 @@
1
+ import type { client_id, string_uri } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for MultipleLlmExecutionTools
5
+ */
6
+ export type MultipleLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
7
+ /**
8
+ * URL of the multiple PROMPTBOOK server
9
+ * On this server will be connected to the socket.io server
10
+ */
11
+ readonly multipleUrl: URL;
12
+ /**
13
+ * Path for the Socket.io server to listen
14
+ *
15
+ * @default '/socket.io'
16
+ * @example '/promptbook/socket.io'
17
+ */
18
+ readonly path: string_uri;
19
+ /**
20
+ * Your client ID
21
+ */
22
+ readonly clientId: client_id;
23
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -8,7 +8,7 @@ import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptio
8
8
  */
9
9
  export declare class JavascriptEvalExecutionTools implements ScriptExecutionTools {
10
10
  private readonly options;
11
- constructor(options: JavascriptExecutionToolsOptions);
11
+ constructor(options?: JavascriptExecutionToolsOptions);
12
12
  /**
13
13
  * Executes a JavaScript
14
14
  */
@@ -7,7 +7,7 @@ import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptio
7
7
  */
8
8
  export declare class JavascriptExecutionTools implements ScriptExecutionTools {
9
9
  private readonly options;
10
- constructor(options: JavascriptExecutionToolsOptions);
10
+ constructor(options?: JavascriptExecutionToolsOptions);
11
11
  /**
12
12
  * Executes a JavaScript
13
13
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/cli",
3
- "version": "0.52.0-2",
3
+ "version": "0.52.0-4",
4
4
  "description": "Library to supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -52,7 +52,7 @@
52
52
  }
53
53
  ],
54
54
  "peerDependencies": {
55
- "@promptbook/core": "0.52.0-2"
55
+ "@promptbook/core": "0.52.0-4"
56
56
  },
57
57
  "main": "./umd/index.umd.js",
58
58
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -146,7 +146,7 @@
146
146
  /**
147
147
  * The version of the Promptbook library
148
148
  */
149
- var PROMPTBOOK_VERSION = '0.52.0-1';
149
+ var PROMPTBOOK_VERSION = '0.52.0-3';
150
150
 
151
151
  /**
152
152
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -11,9 +11,10 @@ import { createPromptbookLibraryFromSources } from '../library/constructors/crea
11
11
  import { createPromptbookSublibrary } from '../library/constructors/createPromptbookSublibrary';
12
12
  import { ExecutionTypes } from '../types/ExecutionTypes';
13
13
  import { PROMPTBOOK_VERSION } from '../version';
14
+ import { MultipleLlmExecutionTools } from '../execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools';
14
15
  export { ExecutionTypes, PROMPTBOOK_VERSION };
15
16
  export { createPromptbookLibraryFromPromise, createPromptbookLibraryFromSources, createPromptbookSublibrary, SimplePromptbookLibrary, };
16
17
  export { SimplePromptInterfaceTools };
17
18
  export { promptbookStringToJson, promptbookJsonToString, validatePromptbookJson };
18
- export { createPromptbookExecutor };
19
+ export { createPromptbookExecutor, MultipleLlmExecutionTools };
19
20
  export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
@@ -35,6 +35,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
35
35
  listModels(): Array<AvailableModel>;
36
36
  }
37
37
  /**
38
+ * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
38
39
  * TODO: [🍓][♐] Allow to list compatible models with each variant
39
40
  * TODO: Maybe Create some common util for gptChat and gptComplete
40
41
  * TODO: Maybe make custom OpenaiError
@@ -25,6 +25,10 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
25
25
  * Calls Azure OpenAI API to use a complete model.
26
26
  */
27
27
  gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * Changes Azure error (which is not propper Error but object) to propper Error
30
+ */
31
+ private transformAzureError;
28
32
  /**
29
33
  * List all available Azure OpenAI models that can be used
30
34
  */
@@ -0,0 +1,35 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ /**
5
+ * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
6
+ *
7
+ * @see https://github.com/webgptorg/promptbook#multiple-server
8
+ */
9
+ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
10
+ /**
11
+ * Array of execution tools in order of priority
12
+ */
13
+ private llmExecutionTools;
14
+ /**
15
+ * Gets array of execution tools in order of priority
16
+ */
17
+ constructor(...llmExecutionTools: Array<LlmExecutionTools>);
18
+ /**
19
+ * Calls the best available chat model
20
+ */
21
+ gptChat(prompt: Prompt): Promise<PromptChatResult>;
22
+ /**
23
+ * Calls the best available completion model
24
+ */
25
+ gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
26
+ /**
27
+ * Calls the best available model
28
+ */
29
+ private gptCommon;
30
+ /**
31
+ * List all available models that can be used
32
+ * This liost is a combination of all available models from all execution tools
33
+ */
34
+ listModels(): Promise<Array<AvailableModel>>;
35
+ }
@@ -0,0 +1,23 @@
1
+ import type { client_id, string_uri } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for MultipleLlmExecutionTools
5
+ */
6
+ export type MultipleLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
7
+ /**
8
+ * URL of the multiple PROMPTBOOK server
9
+ * On this server will be connected to the socket.io server
10
+ */
11
+ readonly multipleUrl: URL;
12
+ /**
13
+ * Path for the Socket.io server to listen
14
+ *
15
+ * @default '/socket.io'
16
+ * @example '/promptbook/socket.io'
17
+ */
18
+ readonly path: string_uri;
19
+ /**
20
+ * Your client ID
21
+ */
22
+ readonly clientId: client_id;
23
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -8,7 +8,7 @@ import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptio
8
8
  */
9
9
  export declare class JavascriptEvalExecutionTools implements ScriptExecutionTools {
10
10
  private readonly options;
11
- constructor(options: JavascriptExecutionToolsOptions);
11
+ constructor(options?: JavascriptExecutionToolsOptions);
12
12
  /**
13
13
  * Executes a JavaScript
14
14
  */
@@ -7,7 +7,7 @@ import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptio
7
7
  */
8
8
  export declare class JavascriptExecutionTools implements ScriptExecutionTools {
9
9
  private readonly options;
10
- constructor(options: JavascriptExecutionToolsOptions);
10
+ constructor(options?: JavascriptExecutionToolsOptions);
11
11
  /**
12
12
  * Executes a JavaScript
13
13
  */