@promptbook/remote-client 0.52.0-1 → 0.52.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +4 -3
  2. package/esm/index.es.js +1 -1
  3. package/esm/typings/_packages/anthropic-claude.index.d.ts +3 -0
  4. package/esm/typings/_packages/azure-openai.index.d.ts +3 -0
  5. package/esm/typings/_packages/openai.index.d.ts +1 -1
  6. package/esm/typings/execution/PromptResult.d.ts +3 -3
  7. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +41 -0
  8. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +8 -0
  9. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/anthropic-claude-models.d.ts +20 -0
  10. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/playground/playground.d.ts +2 -0
  11. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +37 -0
  12. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +34 -0
  13. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/playground/playground.d.ts +2 -0
  14. package/esm/typings/execution/plugins/llm-execution-tools/langtail/playground/playground.d.ts +2 -0
  15. package/esm/typings/execution/plugins/llm-execution-tools/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
  16. package/esm/typings/execution/plugins/llm-execution-tools/mocked/MockedFackedLlmExecutionTools.d.ts +2 -2
  17. package/esm/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools.d.ts +3 -3
  18. package/esm/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions.d.ts +3 -1
  19. package/esm/typings/execution/plugins/llm-execution-tools/openai/computeUsage.d.ts +6 -3
  20. package/esm/typings/execution/plugins/llm-execution-tools/openai/{models.d.ts → openai-models.d.ts} +2 -1
  21. package/esm/typings/execution/plugins/llm-execution-tools/openai/playground/playground.d.ts +2 -0
  22. package/package.json +2 -2
  23. package/umd/index.umd.js +1 -1
  24. package/umd/typings/_packages/anthropic-claude.index.d.ts +3 -0
  25. package/umd/typings/_packages/azure-openai.index.d.ts +3 -0
  26. package/umd/typings/_packages/openai.index.d.ts +1 -1
  27. package/umd/typings/execution/PromptResult.d.ts +3 -3
  28. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +41 -0
  29. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +8 -0
  30. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/anthropic-claude-models.d.ts +20 -0
  31. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/playground/playground.d.ts +2 -0
  32. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +37 -0
  33. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +34 -0
  34. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/playground/playground.d.ts +2 -0
  35. package/umd/typings/execution/plugins/llm-execution-tools/langtail/playground/playground.d.ts +2 -0
  36. package/umd/typings/execution/plugins/llm-execution-tools/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
  37. package/umd/typings/execution/plugins/llm-execution-tools/mocked/MockedFackedLlmExecutionTools.d.ts +2 -2
  38. package/umd/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools.d.ts +3 -3
  39. package/umd/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions.d.ts +3 -1
  40. package/umd/typings/execution/plugins/llm-execution-tools/openai/computeUsage.d.ts +6 -3
  41. package/umd/typings/execution/plugins/llm-execution-tools/openai/{models.d.ts → openai-models.d.ts} +2 -1
  42. package/umd/typings/execution/plugins/llm-execution-tools/openai/playground/playground.d.ts +2 -0
package/README.md CHANGED
@@ -329,7 +329,7 @@ Or you can install them separately:
329
329
  - _(Not finished)_ **[@promptbook/wizzard](https://www.npmjs.com/package/@promptbook/wizzard)** - Wizard for creating+running promptbooks in single line
330
330
  - **[@promptbook/execute-javascript](https://www.npmjs.com/package/@promptbook/execute-javascript)** - Execution tools for javascript inside promptbooks
331
331
  - **[@promptbook/openai](https://www.npmjs.com/package/@promptbook/openai)** - Execution tools for OpenAI API, wrapper around OpenAI SDK
332
- - **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
332
+ - **[@promptbook/anthropic-claude](https://www.npmjs.com/package/@promptbook/anthropic-claude)** - Execution tools for Anthropic Claude API, wrapper around Anthropic Claude SDK
333
333
  - **[@promptbook/azure-openai](https://www.npmjs.com/package/@promptbook/azure-openai)** - Execution tools for Azure OpenAI API
334
334
  - **[@promptbook/langtail](https://www.npmjs.com/package/@promptbook/langtail)** - Execution tools for Langtail API, wrapper around Langtail SDK
335
335
  - **[@promptbook/mock](https://www.npmjs.com/package/@promptbook/mock)** - Mocked execution tools for testing the library and saving the tokens
@@ -482,8 +482,9 @@ Internally it calls OpenAI, Azure, GPU, proxy, cache, logging,...
482
482
  `LlmExecutionTools` an abstract interface that is implemented by concrete execution tools:
483
483
 
484
484
  - `OpenAiExecutionTools`
485
- - _(Not implemented yet !!!!! )_ `AnthropicClaudeExecutionTools`
486
- - _(Not implemented yet !!!!! )_ `AzureOpenAiExecutionTools`
485
+ - `AnthropicClaudeExecutionTools`
486
+ - `AzureOpenAiExecutionTools`
487
+ - `LangtailExecutionTools`
487
488
  - _(Not implemented yet)_ `BardExecutionTools`
488
489
  - _(Not implemented yet)_ `LamaExecutionTools`
489
490
  - _(Not implemented yet)_ `GpuExecutionTools`
package/esm/index.es.js CHANGED
@@ -141,7 +141,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
141
141
  return __awaiter(this, void 0, void 0, function () {
142
142
  return __generator(this, function (_a) {
143
143
  return [2 /*return*/, [
144
- /* !!!!! */
144
+ /* !!! */
145
145
  ]];
146
146
  });
147
147
  });
@@ -0,0 +1,3 @@
1
+ import { AnthropicClaudeExecutionTools } from '../execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools';
2
+ import { AnthropicClaudeExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
3
+ export { AnthropicClaudeExecutionTools, AnthropicClaudeExecutionToolsOptions };
@@ -0,0 +1,3 @@
1
+ import { AzureOpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools';
2
+ import { AzureOpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions';
3
+ export { AzureOpenAiExecutionTools, AzureOpenAiExecutionToolsOptions };
@@ -1,4 +1,4 @@
1
- import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/models';
1
+ import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/openai-models';
2
2
  import { OpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools';
3
3
  import { OpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions';
4
4
  export { OPENAI_MODELS, OpenAiExecutionTools, OpenAiExecutionToolsOptions };
@@ -23,7 +23,7 @@ export type PromptCommonResult = {
23
23
  /**
24
24
  * Name of the model used to generate the response
25
25
  */
26
- readonly model: string_model_name;
26
+ readonly modelName: string_model_name;
27
27
  /**
28
28
  * Timing
29
29
  */
@@ -54,11 +54,11 @@ export type PromptCommonResult = {
54
54
  /**
55
55
  * Number of tokens used in the input aka. `prompt_tokens`
56
56
  */
57
- inputTokens: number_tokens;
57
+ inputTokens: number_tokens | 'UNKNOWN';
58
58
  /**
59
59
  * Number of tokens used in the output aka. `completion_tokens`
60
60
  */
61
- outputTokens: number_tokens;
61
+ outputTokens: number_tokens | 'UNKNOWN';
62
62
  };
63
63
  /**
64
64
  * Raw response from the model
@@ -0,0 +1,41 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExecutionToolsOptions';
5
+ /**
6
+ * Execution Tools for calling Anthropic Claude API.
7
+ */
8
+ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools {
9
+ private readonly options;
10
+ /**
11
+ * Anthropic Claude API client.
12
+ */
13
+ private readonly client;
14
+ /**
15
+ * Creates Anthropic Claude Execution Tools.
16
+ *
17
+ * @param options which are relevant are directly passed to the Anthropic Claude client
18
+ */
19
+ constructor(options: AnthropicClaudeExecutionToolsOptions);
20
+ /**
21
+ * Calls Anthropic Claude API to use a chat model.
22
+ */
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
+ /**
25
+ * Calls Anthropic Claude API to use a complete model.
26
+ */
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * Default model for chat variant.
30
+ */
31
+ private getDefaultChatModel;
32
+ /**
33
+ * List all available Anthropic Claude models that can be used
34
+ */
35
+ listModels(): Array<AvailableModel>;
36
+ }
37
+ /**
38
+ * TODO: [🍓][♐] Allow to list compatible models with each variant
39
+ * TODO: Maybe Create some common util for gptChat and gptComplete
40
+ * TODO: Maybe make custom OpenaiError
41
+ */
@@ -0,0 +1,8 @@
1
+ import type { ClientOptions } from '@anthropic-ai/sdk';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AnthropicClaudeExecutionTools
5
+ *
6
+ * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
+ */
8
+ export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
@@ -0,0 +1,20 @@
1
+ import { number_usd } from '../../../../types/typeAliases';
2
+ import type { AvailableModel } from '../../../LlmExecutionTools';
3
+ /**
4
+ * List of available Anthropic Claude models with pricing
5
+ *
6
+ * Note: Done at 2024-05-25
7
+ *
8
+ * @see https://docs.anthropic.com/en/docs/models-overview
9
+ */
10
+ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
11
+ pricing?: {
12
+ prompt: number_usd;
13
+ output: number_usd;
14
+ };
15
+ }>;
16
+ /**
17
+ * TODO: [🧠] Some mechanism to propagate unsureness
18
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
19
+ * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ */
@@ -0,0 +1,37 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToolsOptions';
5
+ /**
6
+ * Execution Tools for calling Azure OpenAI API.
7
+ */
8
+ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
9
+ private readonly options;
10
+ /**
11
+ * OpenAI Azure API client.
12
+ */
13
+ private readonly client;
14
+ /**
15
+ * Creates OpenAI Execution Tools.
16
+ *
17
+ * @param options which are relevant are directly passed to the OpenAI client
18
+ */
19
+ constructor(options: AzureOpenAiExecutionToolsOptions);
20
+ /**
21
+ * Calls OpenAI API to use a chat model.
22
+ */
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
+ /**
25
+ * Calls Azure OpenAI API to use a complete model.
26
+ */
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * List all available Azure OpenAI models that can be used
30
+ */
31
+ listModels(): Promise<Array<AvailableModel>>;
32
+ }
33
+ /**
34
+ * TODO: [🍓][♐] Allow to list compatible models with each variant
35
+ * TODO: Maybe Create some common util for gptChat and gptComplete
36
+ * TODO: Maybe make custom AzureOpenaiError
37
+ */
@@ -0,0 +1,34 @@
1
+ import { string_name, string_token } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AzureOpenAiExecutionTools
5
+ *
6
+ * @see https://oai.azure.com/portal/
7
+ */
8
+ export type AzureOpenAiExecutionToolsOptions = CommonExecutionToolsOptions & {
9
+ /**
10
+ * The resource name of the Azure OpenAI resource
11
+ *
12
+ * Note: Typically you have one resource and multiple deployments.
13
+ */
14
+ resourceName: string_name;
15
+ /**
16
+ * The deployment name
17
+ *
18
+ * Note: If you specify modelName in prompt, it will be used instead of deploymentName
19
+ * Note: This is kind of a modelName in OpenAI terms
20
+ * Note: Typically you have one resource and multiple deployments.
21
+ */
22
+ deploymentName: string_name;
23
+ /**
24
+ * The API key of the Azure OpenAI resource
25
+ */
26
+ apiKey: string_token;
27
+ /**
28
+ * A unique identifier representing your end-user, which can help Azure OpenAI to monitor
29
+ * and detect abuse.
30
+ *
31
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids (document from OpenAI not Azure, but same concept)
32
+ */
33
+ user?: string_token;
34
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -11,11 +11,11 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
11
11
  /**
12
12
  * Mocks chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
15
15
  /**
16
16
  * Mocks completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available mocked-models that can be used
21
21
  */
@@ -11,11 +11,11 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
11
11
  /**
12
12
  * Fakes chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult & PromptCompletionResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
15
15
  /**
16
16
  * Fakes completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available fake-models that can be used
21
21
  */
@@ -10,7 +10,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
10
10
  /**
11
11
  * OpenAI API client.
12
12
  */
13
- private readonly openai;
13
+ private readonly client;
14
14
  /**
15
15
  * Creates OpenAI Execution Tools.
16
16
  *
@@ -20,11 +20,11 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
20
20
  /**
21
21
  * Calls OpenAI API to use a chat model.
22
22
  */
23
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
24
  /**
25
25
  * Calls OpenAI API to use a complete model.
26
26
  */
27
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
28
  /**
29
29
  * Default model for chat variant.
30
30
  */
@@ -10,7 +10,9 @@ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsO
10
10
  export type OpenAiExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions & {
11
11
  /**
12
12
  * A unique identifier representing your end-user, which can help OpenAI to monitor
13
- * and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
13
+ * and detect abuse.
14
+ *
15
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
14
16
  */
15
17
  user?: string_token;
16
18
  };
@@ -1,13 +1,16 @@
1
1
  /**
2
- * String value found on openai page
2
+ * String value found on OpenAI and Anthropic Claude page
3
+ *
4
+ * @see https://openai.com/api/pricing/
5
+ * @see https://docs.anthropic.com/en/docs/models-overview
3
6
  *
4
7
  * @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
5
8
  */
6
- type string_openai_price = `$${number}.${number} / ${number}M tokens`;
9
+ type string_model_price = `$${number}.${number} / ${number}M tokens`;
7
10
  /**
8
11
  * Function computeUsage will create price per one token based on the string value found on openai page
9
12
  *
10
13
  * @private within the library, used only as internal helper for `OPENAI_MODELS`
11
14
  */
12
- export declare function computeUsage(value: string_openai_price): number;
15
+ export declare function computeUsage(value: string_model_price): number;
13
16
  export {};
@@ -16,7 +16,8 @@ export declare const OPENAI_MODELS: Array<AvailableModel & {
16
16
  }>;
17
17
  /**
18
18
  * TODO: [🧠] Some mechanism to propagate unsureness
19
- * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
19
+ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
20
21
  * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
21
22
  * @see https://openai.com/api/pricing/
22
23
  * @see /other/playground/playground.ts
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-client",
3
- "version": "0.52.0-1",
3
+ "version": "0.52.0-2",
4
4
  "description": "Library to supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  }
48
48
  ],
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.52.0-1"
50
+ "@promptbook/core": "0.52.0-2"
51
51
  },
52
52
  "main": "./umd/index.umd.js",
53
53
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -145,7 +145,7 @@
145
145
  return __awaiter(this, void 0, void 0, function () {
146
146
  return __generator(this, function (_a) {
147
147
  return [2 /*return*/, [
148
- /* !!!!! */
148
+ /* !!! */
149
149
  ]];
150
150
  });
151
151
  });
@@ -0,0 +1,3 @@
1
+ import { AnthropicClaudeExecutionTools } from '../execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools';
2
+ import { AnthropicClaudeExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
3
+ export { AnthropicClaudeExecutionTools, AnthropicClaudeExecutionToolsOptions };
@@ -0,0 +1,3 @@
1
+ import { AzureOpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools';
2
+ import { AzureOpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions';
3
+ export { AzureOpenAiExecutionTools, AzureOpenAiExecutionToolsOptions };
@@ -1,4 +1,4 @@
1
- import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/models';
1
+ import { OPENAI_MODELS } from '../execution/plugins/llm-execution-tools/openai/openai-models';
2
2
  import { OpenAiExecutionTools } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools';
3
3
  import { OpenAiExecutionToolsOptions } from '../execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions';
4
4
  export { OPENAI_MODELS, OpenAiExecutionTools, OpenAiExecutionToolsOptions };
@@ -23,7 +23,7 @@ export type PromptCommonResult = {
23
23
  /**
24
24
  * Name of the model used to generate the response
25
25
  */
26
- readonly model: string_model_name;
26
+ readonly modelName: string_model_name;
27
27
  /**
28
28
  * Timing
29
29
  */
@@ -54,11 +54,11 @@ export type PromptCommonResult = {
54
54
  /**
55
55
  * Number of tokens used in the input aka. `prompt_tokens`
56
56
  */
57
- inputTokens: number_tokens;
57
+ inputTokens: number_tokens | 'UNKNOWN';
58
58
  /**
59
59
  * Number of tokens used in the output aka. `completion_tokens`
60
60
  */
61
- outputTokens: number_tokens;
61
+ outputTokens: number_tokens | 'UNKNOWN';
62
62
  };
63
63
  /**
64
64
  * Raw response from the model
@@ -0,0 +1,41 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExecutionToolsOptions';
5
+ /**
6
+ * Execution Tools for calling Anthropic Claude API.
7
+ */
8
+ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools {
9
+ private readonly options;
10
+ /**
11
+ * Anthropic Claude API client.
12
+ */
13
+ private readonly client;
14
+ /**
15
+ * Creates Anthropic Claude Execution Tools.
16
+ *
17
+ * @param options which are relevant are directly passed to the Anthropic Claude client
18
+ */
19
+ constructor(options: AnthropicClaudeExecutionToolsOptions);
20
+ /**
21
+ * Calls Anthropic Claude API to use a chat model.
22
+ */
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
+ /**
25
+ * Calls Anthropic Claude API to use a complete model.
26
+ */
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * Default model for chat variant.
30
+ */
31
+ private getDefaultChatModel;
32
+ /**
33
+ * List all available Anthropic Claude models that can be used
34
+ */
35
+ listModels(): Array<AvailableModel>;
36
+ }
37
+ /**
38
+ * TODO: [🍓][♐] Allow to list compatible models with each variant
39
+ * TODO: Maybe Create some common util for gptChat and gptComplete
40
+ * TODO: Maybe make custom OpenaiError
41
+ */
@@ -0,0 +1,8 @@
1
+ import type { ClientOptions } from '@anthropic-ai/sdk';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AnthropicClaudeExecutionTools
5
+ *
6
+ * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
+ */
8
+ export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
@@ -0,0 +1,20 @@
1
+ import { number_usd } from '../../../../types/typeAliases';
2
+ import type { AvailableModel } from '../../../LlmExecutionTools';
3
+ /**
4
+ * List of available Anthropic Claude models with pricing
5
+ *
6
+ * Note: Done at 2024-05-25
7
+ *
8
+ * @see https://docs.anthropic.com/en/docs/models-overview
9
+ */
10
+ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
11
+ pricing?: {
12
+ prompt: number_usd;
13
+ output: number_usd;
14
+ };
15
+ }>;
16
+ /**
17
+ * TODO: [🧠] Some mechanism to propagate unsureness
18
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
19
+ * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ */
@@ -0,0 +1,37 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToolsOptions';
5
+ /**
6
+ * Execution Tools for calling Azure OpenAI API.
7
+ */
8
+ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
9
+ private readonly options;
10
+ /**
11
+ * OpenAI Azure API client.
12
+ */
13
+ private readonly client;
14
+ /**
15
+ * Creates OpenAI Execution Tools.
16
+ *
17
+ * @param options which are relevant are directly passed to the OpenAI client
18
+ */
19
+ constructor(options: AzureOpenAiExecutionToolsOptions);
20
+ /**
21
+ * Calls OpenAI API to use a chat model.
22
+ */
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
+ /**
25
+ * Calls Azure OpenAI API to use a complete model.
26
+ */
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * List all available Azure OpenAI models that can be used
30
+ */
31
+ listModels(): Promise<Array<AvailableModel>>;
32
+ }
33
+ /**
34
+ * TODO: [🍓][♐] Allow to list compatible models with each variant
35
+ * TODO: Maybe Create some common util for gptChat and gptComplete
36
+ * TODO: Maybe make custom AzureOpenaiError
37
+ */
@@ -0,0 +1,34 @@
1
+ import { string_name, string_token } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AzureOpenAiExecutionTools
5
+ *
6
+ * @see https://oai.azure.com/portal/
7
+ */
8
+ export type AzureOpenAiExecutionToolsOptions = CommonExecutionToolsOptions & {
9
+ /**
10
+ * The resource name of the Azure OpenAI resource
11
+ *
12
+ * Note: Typically you have one resource and multiple deployments.
13
+ */
14
+ resourceName: string_name;
15
+ /**
16
+ * The deployment name
17
+ *
18
+ * Note: If you specify modelName in prompt, it will be used instead of deploymentName
19
+ * Note: This is kind of a modelName in OpenAI terms
20
+ * Note: Typically you have one resource and multiple deployments.
21
+ */
22
+ deploymentName: string_name;
23
+ /**
24
+ * The API key of the Azure OpenAI resource
25
+ */
26
+ apiKey: string_token;
27
+ /**
28
+ * A unique identifier representing your end-user, which can help Azure OpenAI to monitor
29
+ * and detect abuse.
30
+ *
31
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids (document from OpenAI not Azure, but same concept)
32
+ */
33
+ user?: string_token;
34
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -11,11 +11,11 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
11
11
  /**
12
12
  * Mocks chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
15
15
  /**
16
16
  * Mocks completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available mocked-models that can be used
21
21
  */
@@ -11,11 +11,11 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
11
11
  /**
12
12
  * Fakes chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult & PromptCompletionResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
15
15
  /**
16
16
  * Fakes completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available fake-models that can be used
21
21
  */
@@ -10,7 +10,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
10
10
  /**
11
11
  * OpenAI API client.
12
12
  */
13
- private readonly openai;
13
+ private readonly client;
14
14
  /**
15
15
  * Creates OpenAI Execution Tools.
16
16
  *
@@ -20,11 +20,11 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
20
20
  /**
21
21
  * Calls OpenAI API to use a chat model.
22
22
  */
23
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
24
  /**
25
25
  * Calls OpenAI API to use a complete model.
26
26
  */
27
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
28
  /**
29
29
  * Default model for chat variant.
30
30
  */
@@ -10,7 +10,9 @@ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsO
10
10
  export type OpenAiExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions & {
11
11
  /**
12
12
  * A unique identifier representing your end-user, which can help OpenAI to monitor
13
- * and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
13
+ * and detect abuse.
14
+ *
15
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
14
16
  */
15
17
  user?: string_token;
16
18
  };
@@ -1,13 +1,16 @@
1
1
  /**
2
- * String value found on openai page
2
+ * String value found on OpenAI and Anthropic Claude page
3
+ *
4
+ * @see https://openai.com/api/pricing/
5
+ * @see https://docs.anthropic.com/en/docs/models-overview
3
6
  *
4
7
  * @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
5
8
  */
6
- type string_openai_price = `$${number}.${number} / ${number}M tokens`;
9
+ type string_model_price = `$${number}.${number} / ${number}M tokens`;
7
10
  /**
8
11
  * Function computeUsage will create price per one token based on the string value found on openai page
9
12
  *
10
13
  * @private within the library, used only as internal helper for `OPENAI_MODELS`
11
14
  */
12
- export declare function computeUsage(value: string_openai_price): number;
15
+ export declare function computeUsage(value: string_model_price): number;
13
16
  export {};
@@ -16,7 +16,8 @@ export declare const OPENAI_MODELS: Array<AvailableModel & {
16
16
  }>;
17
17
  /**
18
18
  * TODO: [🧠] Some mechanism to propagate unsureness
19
- * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
19
+ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
20
21
  * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
21
22
  * @see https://openai.com/api/pricing/
22
23
  * @see /other/playground/playground.ts
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};