@promptbook/remote-client 0.52.0-1 → 0.52.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +10 -6
  2. package/esm/index.es.js +1 -1
  3. package/esm/typings/_packages/anthropic-claude.index.d.ts +3 -0
  4. package/esm/typings/_packages/azure-openai.index.d.ts +3 -0
  5. package/esm/typings/_packages/core.index.d.ts +2 -1
  6. package/esm/typings/_packages/openai.index.d.ts +1 -1
  7. package/esm/typings/execution/PromptResult.d.ts +3 -3
  8. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +42 -0
  9. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +8 -0
  10. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/anthropic-claude-models.d.ts +20 -0
  11. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/playground/playground.d.ts +2 -0
  12. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +41 -0
  13. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +34 -0
  14. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/playground/playground.d.ts +2 -0
  15. package/esm/typings/execution/plugins/llm-execution-tools/langtail/playground/playground.d.ts +2 -0
  16. package/esm/typings/execution/plugins/llm-execution-tools/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
  17. package/esm/typings/execution/plugins/llm-execution-tools/mocked/MockedFackedLlmExecutionTools.d.ts +2 -2
  18. package/esm/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts +35 -0
  19. package/esm/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionToolsOptions.d.ts +23 -0
  20. package/esm/typings/execution/plugins/llm-execution-tools/multiple/playground/playground.d.ts +2 -0
  21. package/esm/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools.d.ts +3 -3
  22. package/esm/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions.d.ts +3 -1
  23. package/esm/typings/execution/plugins/llm-execution-tools/openai/computeUsage.d.ts +6 -3
  24. package/esm/typings/execution/plugins/llm-execution-tools/openai/{models.d.ts → openai-models.d.ts} +2 -1
  25. package/esm/typings/execution/plugins/llm-execution-tools/openai/playground/playground.d.ts +2 -0
  26. package/package.json +2 -2
  27. package/umd/index.umd.js +1 -1
  28. package/umd/typings/_packages/anthropic-claude.index.d.ts +3 -0
  29. package/umd/typings/_packages/azure-openai.index.d.ts +3 -0
  30. package/umd/typings/_packages/core.index.d.ts +2 -1
  31. package/umd/typings/_packages/openai.index.d.ts +1 -1
  32. package/umd/typings/execution/PromptResult.d.ts +3 -3
  33. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +42 -0
  34. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +8 -0
  35. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/anthropic-claude-models.d.ts +20 -0
  36. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/playground/playground.d.ts +2 -0
  37. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +41 -0
  38. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +34 -0
  39. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/playground/playground.d.ts +2 -0
  40. package/umd/typings/execution/plugins/llm-execution-tools/langtail/playground/playground.d.ts +2 -0
  41. package/umd/typings/execution/plugins/llm-execution-tools/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
  42. package/umd/typings/execution/plugins/llm-execution-tools/mocked/MockedFackedLlmExecutionTools.d.ts +2 -2
  43. package/umd/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts +35 -0
  44. package/umd/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionToolsOptions.d.ts +23 -0
  45. package/umd/typings/execution/plugins/llm-execution-tools/multiple/playground/playground.d.ts +2 -0
  46. package/umd/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools.d.ts +3 -3
  47. package/umd/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions.d.ts +3 -1
  48. package/umd/typings/execution/plugins/llm-execution-tools/openai/computeUsage.d.ts +6 -3
  49. package/umd/typings/execution/plugins/llm-execution-tools/openai/{models.d.ts → openai-models.d.ts} +2 -1
  50. package/umd/typings/execution/plugins/llm-execution-tools/openai/playground/playground.d.ts +2 -0
package/README.md CHANGED
@@ -329,7 +329,7 @@ Or you can install them separately:
329
329
  - _(Not finished)_ **[@promptbook/wizzard](https://www.npmjs.com/package/@promptbook/wizzard)** - Wizard for creating+running promptbooks in single line
330
330
  - **[@promptbook/execute-javascript](https://www.npmjs.com/package/@promptbook/execute-javascript)** - Execution tools for javascript inside promptbooks
331
331
  - **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
332
- - **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
332
+ - **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
333
333
  - **[@promptbook/azure-openai](https://www.npmjs.com/package/@promptbook/azure-openai)** - Execution tools for Azure OpenAI API
334
334
  - **[@promptbook/langtail](https://www.npmjs.com/package/@promptbook/langtail)** - Execution tools for Langtail API, wrapper around Langtail SDK
335
335
  - **[@promptbook/mock](https://www.npmjs.com/package/@promptbook/mock)** - Mocked execution tools for testing the library and saving the tokens
@@ -482,14 +482,16 @@ Internally it calls OpenAI, Azure, GPU, proxy, cache, logging,...
482
482
  `LlmExecutionTools` an abstract interface that is implemented by concrete execution tools:
483
483
 
484
484
  - `OpenAiExecutionTools`
485
- - _(Not implemented yet !!!!! )_ `AnthropicClaudeExecutionTools`
486
- - _(Not implemented yet !!!!! )_ `AzureOpenAiExecutionTools`
485
+ - `AnthropicClaudeExecutionTools`
486
+ - `AzureOpenAiExecutionTools`
487
+ - `LangtailExecutionTools`
487
488
  - _(Not implemented yet)_ `BardExecutionTools`
488
489
  - _(Not implemented yet)_ `LamaExecutionTools`
489
490
  - _(Not implemented yet)_ `GpuExecutionTools`
490
- - And a special case are `RemoteLlmExecutionTools` that connect to a remote server and run one of the above execution tools on that server.
491
- - The second special case is `MockedEchoLlmExecutionTools` that is used for testing and mocking.
492
- - The third special case is `LogLlmExecutionToolsWrapper` that is technically also an execution tools but it is more proxy wrapper around other execution tools that logs all calls to execution tools.
491
+ - And a special case are `MultipleLlmExecutionTools` that combines multiple execution tools together and tries to execute the prompt on the best one.
492
+ - Another special case are `RemoteLlmExecutionTools` that connect to a remote server and run one of the above execution tools on that server.
493
+ - The another special case is `MockedEchoLlmExecutionTools` that is used for testing and mocking.
494
+ - The another special case is `LogLlmExecutionToolsWrapper` that is technically also an execution tools but it is more proxy wrapper around other execution tools that logs all calls to execution tools.
493
495
 
494
496
  #### Script Execution Tools
495
497
 
@@ -588,6 +590,8 @@ Execution report is a simple object or markdown that contains information about
588
590
 
589
591
 
590
592
 
593
+
594
+
591
595
  ### Remote server
592
596
 
593
597
  Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
package/esm/index.es.js CHANGED
@@ -141,7 +141,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
141
141
  return __awaiter(this, void 0, void 0, function () {
142
142
  return __generator(this, function (_a) {
143
143
  return [2 /*return*/, [
144
- /* !!!!! */
144
+ /* !!! */
145
145
  ]];
146
146
  });
147
147
  });
@@ -0,0 +1,3 @@
1
+ import { AnthropicClaudeExecutionTools } from '../execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools';
2
+ import { AnthropicClaudeExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
3
+ export { AnthropicClaudeExecutionTools, AnthropicClaudeExecutionToolsOptions };
@@ -0,0 +1,3 @@
1
+ import { AzureOpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools';
2
+ import { AzureOpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions';
3
+ export { AzureOpenAiExecutionTools, AzureOpenAiExecutionToolsOptions };
@@ -11,9 +11,10 @@ import { createPromptbookLibraryFromSources } from '../library/constructors/crea
11
11
  import { createPromptbookSublibrary } from '../library/constructors/createPromptbookSublibrary';
12
12
  import { ExecutionTypes } from '../types/ExecutionTypes';
13
13
  import { PROMPTBOOK_VERSION } from '../version';
14
+ import { MultipleLlmExecutionTools } from '../execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools';
14
15
  export { ExecutionTypes, PROMPTBOOK_VERSION };
15
16
  export { createPromptbookLibraryFromPromise, createPromptbookLibraryFromSources, createPromptbookSublibrary, SimplePromptbookLibrary, };
16
17
  export { SimplePromptInterfaceTools };
17
18
  export { promptbookStringToJson, promptbookJsonToString, validatePromptbookJson };
18
- export { createPromptbookExecutor };
19
+ export { createPromptbookExecutor, MultipleLlmExecutionTools };
19
20
  export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
@@ -1,4 +1,4 @@
1
- import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/models';
1
+ import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/openai-models';
2
2
  import { OpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools';
3
3
  import { OpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions';
4
4
  export { OPENAI_MODELS, OpenAiExecutionTools, OpenAiExecutionToolsOptions };
@@ -23,7 +23,7 @@ export type PromptCommonResult = {
23
23
  /**
24
24
  * Name of the model used to generate the response
25
25
  */
26
- readonly model: string_model_name;
26
+ readonly modelName: string_model_name;
27
27
  /**
28
28
  * Timing
29
29
  */
@@ -54,11 +54,11 @@ export type PromptCommonResult = {
54
54
  /**
55
55
  * Number of tokens used in the input aka. `prompt_tokens`
56
56
  */
57
- inputTokens: number_tokens;
57
+ inputTokens: number_tokens | 'UNKNOWN';
58
58
  /**
59
59
  * Number of tokens used in the output aka. `completion_tokens`
60
60
  */
61
- outputTokens: number_tokens;
61
+ outputTokens: number_tokens | 'UNKNOWN';
62
62
  };
63
63
  /**
64
64
  * Raw response from the model
@@ -0,0 +1,42 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExecutionToolsOptions';
5
+ /**
6
+ * Execution Tools for calling Anthropic Claude API.
7
+ */
8
+ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools {
9
+ private readonly options;
10
+ /**
11
+ * Anthropic Claude API client.
12
+ */
13
+ private readonly client;
14
+ /**
15
+ * Creates Anthropic Claude Execution Tools.
16
+ *
17
+ * @param options which are relevant are directly passed to the Anthropic Claude client
18
+ */
19
+ constructor(options: AnthropicClaudeExecutionToolsOptions);
20
+ /**
21
+ * Calls Anthropic Claude API to use a chat model.
22
+ */
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
+ /**
25
+ * Calls Anthropic Claude API to use a complete model.
26
+ */
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * Default model for chat variant.
30
+ */
31
+ private getDefaultChatModel;
32
+ /**
33
+ * List all available Anthropic Claude models that can be used
34
+ */
35
+ listModels(): Array<AvailableModel>;
36
+ }
37
+ /**
38
+ * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
39
+ * TODO: [🍓][♐] Allow to list compatible models with each variant
40
+ * TODO: Maybe Create some common util for gptChat and gptComplete
41
+ * TODO: Maybe make custom OpenaiError
42
+ */
@@ -0,0 +1,8 @@
1
+ import type { ClientOptions } from '@anthropic-ai/sdk';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AnthropicClaudeExecutionTools
5
+ *
6
+ * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
+ */
8
+ export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
@@ -0,0 +1,20 @@
1
+ import { number_usd } from '../../../../types/typeAliases';
2
+ import type { AvailableModel } from '../../../LlmExecutionTools';
3
+ /**
4
+ * List of available Anthropic Claude models with pricing
5
+ *
6
+ * Note: Done at 2024-05-25
7
+ *
8
+ * @see https://docs.anthropic.com/en/docs/models-overview
9
+ */
10
+ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
11
+ pricing?: {
12
+ prompt: number_usd;
13
+ output: number_usd;
14
+ };
15
+ }>;
16
+ /**
17
+ * TODO: [🧠] Some mechanism to propagate unsureness
18
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
19
+ * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ */
@@ -0,0 +1,41 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToolsOptions';
5
+ /**
6
+ * Execution Tools for calling Azure OpenAI API.
7
+ */
8
+ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
9
+ private readonly options;
10
+ /**
11
+ * OpenAI Azure API client.
12
+ */
13
+ private readonly client;
14
+ /**
15
+ * Creates OpenAI Execution Tools.
16
+ *
17
+ * @param options which are relevant are directly passed to the OpenAI client
18
+ */
19
+ constructor(options: AzureOpenAiExecutionToolsOptions);
20
+ /**
21
+ * Calls OpenAI API to use a chat model.
22
+ */
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
+ /**
25
+ * Calls Azure OpenAI API to use a complete model.
26
+ */
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * Changes Azure error (which is not propper Error but object) to propper Error
30
+ */
31
+ private transformAzureError;
32
+ /**
33
+ * List all available Azure OpenAI models that can be used
34
+ */
35
+ listModels(): Promise<Array<AvailableModel>>;
36
+ }
37
+ /**
38
+ * TODO: [🍓][♐] Allow to list compatible models with each variant
39
+ * TODO: Maybe Create some common util for gptChat and gptComplete
40
+ * TODO: Maybe make custom AzureOpenaiError
41
+ */
@@ -0,0 +1,34 @@
1
+ import { string_name, string_token } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AzureOpenAiExecutionTools
5
+ *
6
+ * @see https://oai.azure.com/portal/
7
+ */
8
+ export type AzureOpenAiExecutionToolsOptions = CommonExecutionToolsOptions & {
9
+ /**
10
+ * The resource name of the Azure OpenAI resource
11
+ *
12
+ * Note: Typically you have one resource and multiple deployments.
13
+ */
14
+ resourceName: string_name;
15
+ /**
16
+ * The deployment name
17
+ *
18
+ * Note: If you specify modelName in prompt, it will be used instead of deploymentName
19
+ * Note: This is kind of a modelName in OpenAI terms
20
+ * Note: Typically you have one resource and multiple deployments.
21
+ */
22
+ deploymentName: string_name;
23
+ /**
24
+ * The API key of the Azure OpenAI resource
25
+ */
26
+ apiKey: string_token;
27
+ /**
28
+ * A unique identifier representing your end-user, which can help Azure OpenAI to monitor
29
+ * and detect abuse.
30
+ *
31
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids (document from OpenAI not Azure, but same concept)
32
+ */
33
+ user?: string_token;
34
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -11,11 +11,11 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
11
11
  /**
12
12
  * Mocks chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
15
15
  /**
16
16
  * Mocks completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available mocked-models that can be used
21
21
  */
@@ -11,11 +11,11 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
11
11
  /**
12
12
  * Fakes chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult & PromptCompletionResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
15
15
  /**
16
16
  * Fakes completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available fake-models that can be used
21
21
  */
@@ -0,0 +1,35 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ /**
5
+ * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
6
+ *
7
+ * @see https://github.com/webgptorg/promptbook#multiple-server
8
+ */
9
+ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
10
+ /**
11
+ * Array of execution tools in order of priority
12
+ */
13
+ private llmExecutionTools;
14
+ /**
15
+ * Gets array of execution tools in order of priority
16
+ */
17
+ constructor(...llmExecutionTools: Array<LlmExecutionTools>);
18
+ /**
19
+ * Calls the best available chat model
20
+ */
21
+ gptChat(prompt: Prompt): Promise<PromptChatResult>;
22
+ /**
23
+ * Calls the best available completion model
24
+ */
25
+ gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
26
+ /**
27
+ * Calls the best available model
28
+ */
29
+ private gptCommon;
30
+ /**
31
+ * List all available models that can be used
32
+ * This liost is a combination of all available models from all execution tools
33
+ */
34
+ listModels(): Promise<Array<AvailableModel>>;
35
+ }
@@ -0,0 +1,23 @@
1
+ import type { client_id, string_uri } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for MultipleLlmExecutionTools
5
+ */
6
+ export type MultipleLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
7
+ /**
8
+ * URL of the multiple PROMPTBOOK server
9
+ * On this server will be connected to the socket.io server
10
+ */
11
+ readonly multipleUrl: URL;
12
+ /**
13
+ * Path for the Socket.io server to listen
14
+ *
15
+ * @default '/socket.io'
16
+ * @example '/promptbook/socket.io'
17
+ */
18
+ readonly path: string_uri;
19
+ /**
20
+ * Your client ID
21
+ */
22
+ readonly clientId: client_id;
23
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -10,7 +10,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
10
10
  /**
11
11
  * OpenAI API client.
12
12
  */
13
- private readonly openai;
13
+ private readonly client;
14
14
  /**
15
15
  * Creates OpenAI Execution Tools.
16
16
  *
@@ -20,11 +20,11 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
20
20
  /**
21
21
  * Calls OpenAI API to use a chat model.
22
22
  */
23
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
24
  /**
25
25
  * Calls OpenAI API to use a complete model.
26
26
  */
27
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
28
  /**
29
29
  * Default model for chat variant.
30
30
  */
@@ -10,7 +10,9 @@ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsO
10
10
  export type OpenAiExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions & {
11
11
  /**
12
12
  * A unique identifier representing your end-user, which can help OpenAI to monitor
13
- * and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
13
+ * and detect abuse.
14
+ *
15
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
14
16
  */
15
17
  user?: string_token;
16
18
  };
@@ -1,13 +1,16 @@
1
1
  /**
2
- * String value found on openai page
2
+ * String value found on OpenAI and Anthropic Claude page
3
+ *
4
+ * @see https://openai.com/api/pricing/
5
+ * @see https://docs.anthropic.com/en/docs/models-overview
3
6
  *
4
7
  * @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
5
8
  */
6
- type string_openai_price = `$${number}.${number} / ${number}M tokens`;
9
+ type string_model_price = `$${number}.${number} / ${number}M tokens`;
7
10
  /**
8
11
  * Function computeUsage will create price per one token based on the string value found on openai page
9
12
  *
10
13
  * @private within the library, used only as internal helper for `OPENAI_MODELS`
11
14
  */
12
- export declare function computeUsage(value: string_openai_price): number;
15
+ export declare function computeUsage(value: string_model_price): number;
13
16
  export {};
@@ -16,7 +16,8 @@ export declare const OPENAI_MODELS: Array<AvailableModel & {
16
16
  }>;
17
17
  /**
18
18
  * TODO: [🧠] Some mechanism to propagate unsureness
19
- * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
19
+ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
20
21
  * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
21
22
  * @see https://openai.com/api/pricing/
22
23
  * @see /other/playground/playground.ts
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-client",
3
- "version": "0.52.0-1",
3
+ "version": "0.52.0-3",
4
4
  "description": "Library to supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  }
48
48
  ],
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.52.0-1"
50
+ "@promptbook/core": "0.52.0-3"
51
51
  },
52
52
  "main": "./umd/index.umd.js",
53
53
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -145,7 +145,7 @@
145
145
  return __awaiter(this, void 0, void 0, function () {
146
146
  return __generator(this, function (_a) {
147
147
  return [2 /*return*/, [
148
- /* !!!!! */
148
+ /* !!! */
149
149
  ]];
150
150
  });
151
151
  });
@@ -0,0 +1,3 @@
1
+ import { AnthropicClaudeExecutionTools } from '../execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools';
2
+ import { AnthropicClaudeExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
3
+ export { AnthropicClaudeExecutionTools, AnthropicClaudeExecutionToolsOptions };
@@ -0,0 +1,3 @@
1
+ import { AzureOpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools';
2
+ import { AzureOpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions';
3
+ export { AzureOpenAiExecutionTools, AzureOpenAiExecutionToolsOptions };
@@ -11,9 +11,10 @@ import { createPromptbookLibraryFromSources } from '../library/constructors/crea
11
11
  import { createPromptbookSublibrary } from '../library/constructors/createPromptbookSublibrary';
12
12
  import { ExecutionTypes } from '../types/ExecutionTypes';
13
13
  import { PROMPTBOOK_VERSION } from '../version';
14
+ import { MultipleLlmExecutionTools } from '../execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools';
14
15
  export { ExecutionTypes, PROMPTBOOK_VERSION };
15
16
  export { createPromptbookLibraryFromPromise, createPromptbookLibraryFromSources, createPromptbookSublibrary, SimplePromptbookLibrary, };
16
17
  export { SimplePromptInterfaceTools };
17
18
  export { promptbookStringToJson, promptbookJsonToString, validatePromptbookJson };
18
- export { createPromptbookExecutor };
19
+ export { createPromptbookExecutor, MultipleLlmExecutionTools };
19
20
  export { CallbackInterfaceTools, CallbackInterfaceToolsOptions };
@@ -1,4 +1,4 @@
1
- import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/models';
1
+ import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/openai-models';
2
2
  import { OpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools';
3
3
  import { OpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions';
4
4
  export { OPENAI_MODELS, OpenAiExecutionTools, OpenAiExecutionToolsOptions };
@@ -23,7 +23,7 @@ export type PromptCommonResult = {
23
23
  /**
24
24
  * Name of the model used to generate the response
25
25
  */
26
- readonly model: string_model_name;
26
+ readonly modelName: string_model_name;
27
27
  /**
28
28
  * Timing
29
29
  */
@@ -54,11 +54,11 @@ export type PromptCommonResult = {
54
54
  /**
55
55
  * Number of tokens used in the input aka. `prompt_tokens`
56
56
  */
57
- inputTokens: number_tokens;
57
+ inputTokens: number_tokens | 'UNKNOWN';
58
58
  /**
59
59
  * Number of tokens used in the output aka. `completion_tokens`
60
60
  */
61
- outputTokens: number_tokens;
61
+ outputTokens: number_tokens | 'UNKNOWN';
62
62
  };
63
63
  /**
64
64
  * Raw response from the model
@@ -0,0 +1,42 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExecutionToolsOptions';
5
+ /**
6
+ * Execution Tools for calling Anthropic Claude API.
7
+ */
8
+ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools {
9
+ private readonly options;
10
+ /**
11
+ * Anthropic Claude API client.
12
+ */
13
+ private readonly client;
14
+ /**
15
+ * Creates Anthropic Claude Execution Tools.
16
+ *
17
+ * @param options which are relevant are directly passed to the Anthropic Claude client
18
+ */
19
+ constructor(options: AnthropicClaudeExecutionToolsOptions);
20
+ /**
21
+ * Calls Anthropic Claude API to use a chat model.
22
+ */
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
+ /**
25
+ * Calls Anthropic Claude API to use a complete model.
26
+ */
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * Default model for chat variant.
30
+ */
31
+ private getDefaultChatModel;
32
+ /**
33
+ * List all available Anthropic Claude models that can be used
34
+ */
35
+ listModels(): Array<AvailableModel>;
36
+ }
37
+ /**
38
+ * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
39
+ * TODO: [🍓][♐] Allow to list compatible models with each variant
40
+ * TODO: Maybe Create some common util for gptChat and gptComplete
41
+ * TODO: Maybe make custom OpenaiError
42
+ */
@@ -0,0 +1,8 @@
1
+ import type { ClientOptions } from '@anthropic-ai/sdk';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AnthropicClaudeExecutionTools
5
+ *
6
+ * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
+ */
8
+ export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
@@ -0,0 +1,20 @@
1
+ import { number_usd } from '../../../../types/typeAliases';
2
+ import type { AvailableModel } from '../../../LlmExecutionTools';
3
+ /**
4
+ * List of available Anthropic Claude models with pricing
5
+ *
6
+ * Note: Done at 2024-05-25
7
+ *
8
+ * @see https://docs.anthropic.com/en/docs/models-overview
9
+ */
10
+ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
11
+ pricing?: {
12
+ prompt: number_usd;
13
+ output: number_usd;
14
+ };
15
+ }>;
16
+ /**
17
+ * TODO: [🧠] Some mechanism to propagate unsureness
18
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
19
+ * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ */
@@ -0,0 +1,41 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToolsOptions';
5
+ /**
6
+ * Execution Tools for calling Azure OpenAI API.
7
+ */
8
+ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
9
+ private readonly options;
10
+ /**
11
+ * OpenAI Azure API client.
12
+ */
13
+ private readonly client;
14
+ /**
15
+ * Creates OpenAI Execution Tools.
16
+ *
17
+ * @param options which are relevant are directly passed to the OpenAI client
18
+ */
19
+ constructor(options: AzureOpenAiExecutionToolsOptions);
20
+ /**
21
+ * Calls OpenAI API to use a chat model.
22
+ */
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
+ /**
25
+ * Calls Azure OpenAI API to use a complete model.
26
+ */
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * Changes Azure error (which is not propper Error but object) to propper Error
30
+ */
31
+ private transformAzureError;
32
+ /**
33
+ * List all available Azure OpenAI models that can be used
34
+ */
35
+ listModels(): Promise<Array<AvailableModel>>;
36
+ }
37
+ /**
38
+ * TODO: [🍓][♐] Allow to list compatible models with each variant
39
+ * TODO: Maybe Create some common util for gptChat and gptComplete
40
+ * TODO: Maybe make custom AzureOpenaiError
41
+ */
@@ -0,0 +1,34 @@
1
+ import { string_name, string_token } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AzureOpenAiExecutionTools
5
+ *
6
+ * @see https://oai.azure.com/portal/
7
+ */
8
+ export type AzureOpenAiExecutionToolsOptions = CommonExecutionToolsOptions & {
9
+ /**
10
+ * The resource name of the Azure OpenAI resource
11
+ *
12
+ * Note: Typically you have one resource and multiple deployments.
13
+ */
14
+ resourceName: string_name;
15
+ /**
16
+ * The deployment name
17
+ *
18
+ * Note: If you specify modelName in prompt, it will be used instead of deploymentName
19
+ * Note: This is kind of a modelName in OpenAI terms
20
+ * Note: Typically you have one resource and multiple deployments.
21
+ */
22
+ deploymentName: string_name;
23
+ /**
24
+ * The API key of the Azure OpenAI resource
25
+ */
26
+ apiKey: string_token;
27
+ /**
28
+ * A unique identifier representing your end-user, which can help Azure OpenAI to monitor
29
+ * and detect abuse.
30
+ *
31
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids (document from OpenAI not Azure, but same concept)
32
+ */
33
+ user?: string_token;
34
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -11,11 +11,11 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
11
11
  /**
12
12
  * Mocks chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
15
15
  /**
16
16
  * Mocks completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available mocked-models that can be used
21
21
  */
@@ -11,11 +11,11 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
11
11
  /**
12
12
  * Fakes chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult & PromptCompletionResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
15
15
  /**
16
16
  * Fakes completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available fake-models that can be used
21
21
  */
@@ -0,0 +1,35 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ /**
5
+ * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
6
+ *
7
+ * @see https://github.com/webgptorg/promptbook#multiple-server
8
+ */
9
+ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
10
+ /**
11
+ * Array of execution tools in order of priority
12
+ */
13
+ private llmExecutionTools;
14
+ /**
15
+ * Gets array of execution tools in order of priority
16
+ */
17
+ constructor(...llmExecutionTools: Array<LlmExecutionTools>);
18
+ /**
19
+ * Calls the best available chat model
20
+ */
21
+ gptChat(prompt: Prompt): Promise<PromptChatResult>;
22
+ /**
23
+ * Calls the best available completion model
24
+ */
25
+ gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
26
+ /**
27
+ * Calls the best available model
28
+ */
29
+ private gptCommon;
30
+ /**
31
+ * List all available models that can be used
32
+ * This liost is a combination of all available models from all execution tools
33
+ */
34
+ listModels(): Promise<Array<AvailableModel>>;
35
+ }
@@ -0,0 +1,23 @@
1
+ import type { client_id, string_uri } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for MultipleLlmExecutionTools
5
+ */
6
+ export type MultipleLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
7
+ /**
8
+ * URL of the multiple PROMPTBOOK server
9
+ * On this server will be connected to the socket.io server
10
+ */
11
+ readonly multipleUrl: URL;
12
+ /**
13
+ * Path for the Socket.io server to listen
14
+ *
15
+ * @default '/socket.io'
16
+ * @example '/promptbook/socket.io'
17
+ */
18
+ readonly path: string_uri;
19
+ /**
20
+ * Your client ID
21
+ */
22
+ readonly clientId: client_id;
23
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -10,7 +10,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
10
10
  /**
11
11
  * OpenAI API client.
12
12
  */
13
- private readonly openai;
13
+ private readonly client;
14
14
  /**
15
15
  * Creates OpenAI Execution Tools.
16
16
  *
@@ -20,11 +20,11 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
20
20
  /**
21
21
  * Calls OpenAI API to use a chat model.
22
22
  */
23
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
24
  /**
25
25
  * Calls OpenAI API to use a complete model.
26
26
  */
27
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
28
  /**
29
29
  * Default model for chat variant.
30
30
  */
@@ -10,7 +10,9 @@ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsO
10
10
  export type OpenAiExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions & {
11
11
  /**
12
12
  * A unique identifier representing your end-user, which can help OpenAI to monitor
13
- * and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
13
+ * and detect abuse.
14
+ *
15
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
14
16
  */
15
17
  user?: string_token;
16
18
  };
@@ -1,13 +1,16 @@
1
1
  /**
2
- * String value found on openai page
2
+ * String value found on OpenAI and Anthropic Claude page
3
+ *
4
+ * @see https://openai.com/api/pricing/
5
+ * @see https://docs.anthropic.com/en/docs/models-overview
3
6
  *
4
7
  * @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
5
8
  */
6
- type string_openai_price = `$${number}.${number} / ${number}M tokens`;
9
+ type string_model_price = `$${number}.${number} / ${number}M tokens`;
7
10
  /**
8
11
  * Function computeUsage will create price per one token based on the string value found on openai page
9
12
  *
10
13
  * @private within the library, used only as internal helper for `OPENAI_MODELS`
11
14
  */
12
- export declare function computeUsage(value: string_openai_price): number;
15
+ export declare function computeUsage(value: string_model_price): number;
13
16
  export {};
@@ -16,7 +16,8 @@ export declare const OPENAI_MODELS: Array<AvailableModel & {
16
16
  }>;
17
17
  /**
18
18
  * TODO: [🧠] Some mechanism to propagate unsureness
19
- * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
19
+ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
20
21
  * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
21
22
  * @see https://openai.com/api/pricing/
22
23
  * @see /other/playground/playground.ts
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};