@promptbook/openai 0.52.0-0 → 0.52.0-10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +61 -6
  2. package/esm/index.es.js +16 -17
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/_packages/anthropic-claude.index.d.ts +3 -0
  5. package/esm/typings/_packages/azure-openai.index.d.ts +3 -0
  6. package/esm/typings/_packages/core.index.d.ts +13 -5
  7. package/esm/typings/_packages/openai.index.d.ts +1 -1
  8. package/esm/typings/_packages/utils.index.d.ts +3 -8
  9. package/esm/typings/execution/PromptResult.d.ts +3 -3
  10. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +42 -0
  11. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +8 -0
  12. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/anthropic-claude-models.d.ts +20 -0
  13. package/esm/typings/execution/plugins/llm-execution-tools/anthropic-claude/playground/playground.d.ts +2 -0
  14. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +41 -0
  15. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +34 -0
  16. package/esm/typings/execution/plugins/llm-execution-tools/azure-openai/playground/playground.d.ts +2 -0
  17. package/esm/typings/execution/plugins/llm-execution-tools/langtail/playground/playground.d.ts +2 -0
  18. package/esm/typings/execution/plugins/llm-execution-tools/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
  19. package/esm/typings/execution/plugins/llm-execution-tools/mocked/MockedFackedLlmExecutionTools.d.ts +2 -2
  20. package/esm/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts +35 -0
  21. package/esm/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionToolsOptions.d.ts +23 -0
  22. package/esm/typings/execution/plugins/llm-execution-tools/multiple/playground/playground.d.ts +2 -0
  23. package/esm/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools.d.ts +3 -3
  24. package/esm/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions.d.ts +3 -1
  25. package/esm/typings/execution/plugins/llm-execution-tools/openai/computeUsage.d.ts +6 -3
  26. package/esm/typings/execution/plugins/llm-execution-tools/openai/{models.d.ts → openai-models.d.ts} +2 -1
  27. package/esm/typings/execution/plugins/llm-execution-tools/openai/playground/playground.d.ts +2 -0
  28. package/esm/typings/execution/plugins/script-execution-tools/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  29. package/esm/typings/execution/plugins/script-execution-tools/javascript/JavascriptExecutionTools.d.ts +4 -17
  30. package/esm/typings/library/constructors/createPromptbookLibraryFromDirectory.d.ts +16 -1
  31. package/package.json +2 -2
  32. package/umd/index.umd.js +16 -17
  33. package/umd/index.umd.js.map +1 -1
  34. package/umd/typings/_packages/anthropic-claude.index.d.ts +3 -0
  35. package/umd/typings/_packages/azure-openai.index.d.ts +3 -0
  36. package/umd/typings/_packages/core.index.d.ts +13 -5
  37. package/umd/typings/_packages/openai.index.d.ts +1 -1
  38. package/umd/typings/_packages/utils.index.d.ts +3 -8
  39. package/umd/typings/execution/PromptResult.d.ts +3 -3
  40. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +42 -0
  41. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +8 -0
  42. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/anthropic-claude-models.d.ts +20 -0
  43. package/umd/typings/execution/plugins/llm-execution-tools/anthropic-claude/playground/playground.d.ts +2 -0
  44. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionTools.d.ts +41 -0
  45. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +34 -0
  46. package/umd/typings/execution/plugins/llm-execution-tools/azure-openai/playground/playground.d.ts +2 -0
  47. package/umd/typings/execution/plugins/llm-execution-tools/langtail/playground/playground.d.ts +2 -0
  48. package/umd/typings/execution/plugins/llm-execution-tools/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
  49. package/umd/typings/execution/plugins/llm-execution-tools/mocked/MockedFackedLlmExecutionTools.d.ts +2 -2
  50. package/umd/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionTools.d.ts +35 -0
  51. package/umd/typings/execution/plugins/llm-execution-tools/multiple/MultipleLlmExecutionToolsOptions.d.ts +23 -0
  52. package/umd/typings/execution/plugins/llm-execution-tools/multiple/playground/playground.d.ts +2 -0
  53. package/umd/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionTools.d.ts +3 -3
  54. package/umd/typings/execution/plugins/llm-execution-tools/openai/OpenAiExecutionToolsOptions.d.ts +3 -1
  55. package/umd/typings/execution/plugins/llm-execution-tools/openai/computeUsage.d.ts +6 -3
  56. package/umd/typings/execution/plugins/llm-execution-tools/openai/{models.d.ts → openai-models.d.ts} +2 -1
  57. package/umd/typings/execution/plugins/llm-execution-tools/openai/playground/playground.d.ts +2 -0
  58. package/umd/typings/execution/plugins/script-execution-tools/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  59. package/umd/typings/execution/plugins/script-execution-tools/javascript/JavascriptExecutionTools.d.ts +4 -17
  60. package/umd/typings/library/constructors/createPromptbookLibraryFromDirectory.d.ts +16 -1
  61. package/esm/typings/_packages/wizzard.index.d.ts +0 -5
  62. package/umd/typings/_packages/wizzard.index.d.ts +0 -5
@@ -0,0 +1,8 @@
1
+ import type { ClientOptions } from '@anthropic-ai/sdk';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AnthropicClaudeExecutionTools
5
+ *
6
+ * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
+ */
8
+ export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
@@ -0,0 +1,20 @@
1
+ import { number_usd } from '../../../../types/typeAliases';
2
+ import type { AvailableModel } from '../../../LlmExecutionTools';
3
+ /**
4
+ * List of available Anthropic Claude models with pricing
5
+ *
6
+ * Note: Done at 2024-05-25
7
+ *
8
+ * @see https://docs.anthropic.com/en/docs/models-overview
9
+ */
10
+ export declare const ANTHROPIC_CLAUDE_MODELS: Array<AvailableModel & {
11
+ pricing?: {
12
+ prompt: number_usd;
13
+ output: number_usd;
14
+ };
15
+ }>;
16
+ /**
17
+ * TODO: [🧠] Some mechanism to propagate unsureness
18
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
19
+ * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ */
@@ -0,0 +1,41 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ import type { AzureOpenAiExecutionToolsOptions } from './AzureOpenAiExecutionToolsOptions';
5
+ /**
6
+ * Execution Tools for calling Azure OpenAI API.
7
+ */
8
+ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
9
+ private readonly options;
10
+ /**
11
+ * OpenAI Azure API client.
12
+ */
13
+ private readonly client;
14
+ /**
15
+ * Creates OpenAI Execution Tools.
16
+ *
17
+ * @param options which are relevant are directly passed to the OpenAI client
18
+ */
19
+ constructor(options: AzureOpenAiExecutionToolsOptions);
20
+ /**
21
+ * Calls OpenAI API to use a chat model.
22
+ */
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
+ /**
25
+ * Calls Azure OpenAI API to use a complete model.
26
+ */
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
+ /**
29
+ * Changes Azure error (which is not propper Error but object) to propper Error
30
+ */
31
+ private transformAzureError;
32
+ /**
33
+ * List all available Azure OpenAI models that can be used
34
+ */
35
+ listModels(): Promise<Array<AvailableModel>>;
36
+ }
37
+ /**
38
+ * TODO: [🍓][♐] Allow to list compatible models with each variant
39
+ * TODO: Maybe Create some common util for gptChat and gptComplete
40
+ * TODO: Maybe make custom AzureOpenaiError
41
+ */
@@ -0,0 +1,34 @@
1
+ import { string_name, string_token } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for AzureOpenAiExecutionTools
5
+ *
6
+ * @see https://oai.azure.com/portal/
7
+ */
8
+ export type AzureOpenAiExecutionToolsOptions = CommonExecutionToolsOptions & {
9
+ /**
10
+ * The resource name of the Azure OpenAI resource
11
+ *
12
+ * Note: Typically you have one resource and multiple deployments.
13
+ */
14
+ resourceName: string_name;
15
+ /**
16
+ * The deployment name
17
+ *
18
+ * Note: If you specify modelName in prompt, it will be used instead of deploymentName
19
+ * Note: This is kind of a modelName in OpenAI terms
20
+ * Note: Typically you have one resource and multiple deployments.
21
+ */
22
+ deploymentName: string_name;
23
+ /**
24
+ * The API key of the Azure OpenAI resource
25
+ */
26
+ apiKey: string_token;
27
+ /**
28
+ * A unique identifier representing your end-user, which can help Azure OpenAI to monitor
29
+ * and detect abuse.
30
+ *
31
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids (document from OpenAI not Azure, but same concept)
32
+ */
33
+ user?: string_token;
34
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -11,11 +11,11 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
11
11
  /**
12
12
  * Mocks chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
15
15
  /**
16
16
  * Mocks completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available mocked-models that can be used
21
21
  */
@@ -11,11 +11,11 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
11
11
  /**
12
12
  * Fakes chat model
13
13
  */
14
- gptChat(prompt: Prompt): Promise<PromptChatResult & PromptCompletionResult>;
14
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
15
15
  /**
16
16
  * Fakes completion model
17
17
  */
18
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
18
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
19
19
  /**
20
20
  * List all available fake-models that can be used
21
21
  */
@@ -0,0 +1,35 @@
1
+ import type { Prompt } from '../../../../types/Prompt';
2
+ import type { AvailableModel, LlmExecutionTools } from '../../../LlmExecutionTools';
3
+ import type { PromptChatResult, PromptCompletionResult } from '../../../PromptResult';
4
+ /**
5
+ * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
6
+ *
7
+ * @see https://github.com/webgptorg/promptbook#multiple-server
8
+ */
9
+ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
10
+ /**
11
+ * Array of execution tools in order of priority
12
+ */
13
+ private llmExecutionTools;
14
+ /**
15
+ * Gets array of execution tools in order of priority
16
+ */
17
+ constructor(...llmExecutionTools: Array<LlmExecutionTools>);
18
+ /**
19
+ * Calls the best available chat model
20
+ */
21
+ gptChat(prompt: Prompt): Promise<PromptChatResult>;
22
+ /**
23
+ * Calls the best available completion model
24
+ */
25
+ gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
26
+ /**
27
+ * Calls the best available model
28
+ */
29
+ private gptCommon;
30
+ /**
31
+ * List all available models that can be used
32
+ * This liost is a combination of all available models from all execution tools
33
+ */
34
+ listModels(): Promise<Array<AvailableModel>>;
35
+ }
@@ -0,0 +1,23 @@
1
+ import type { client_id, string_uri } from '../../../../types/typeAliases';
2
+ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsOptions';
3
+ /**
4
+ * Options for MultipleLlmExecutionTools
5
+ */
6
+ export type MultipleLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
7
+ /**
8
+ * URL of the multiple PROMPTBOOK server
9
+ * On this server will be connected to the socket.io server
10
+ */
11
+ readonly multipleUrl: URL;
12
+ /**
13
+ * Path for the Socket.io server to listen
14
+ *
15
+ * @default '/socket.io'
16
+ * @example '/promptbook/socket.io'
17
+ */
18
+ readonly path: string_uri;
19
+ /**
20
+ * Your client ID
21
+ */
22
+ readonly clientId: client_id;
23
+ };
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -10,7 +10,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
10
10
  /**
11
11
  * OpenAI API client.
12
12
  */
13
- private readonly openai;
13
+ private readonly client;
14
14
  /**
15
15
  * Creates OpenAI Execution Tools.
16
16
  *
@@ -20,11 +20,11 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
20
20
  /**
21
21
  * Calls OpenAI API to use a chat model.
22
22
  */
23
- gptChat(prompt: Prompt): Promise<PromptChatResult>;
23
+ gptChat(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
24
24
  /**
25
25
  * Calls OpenAI API to use a complete model.
26
26
  */
27
- gptComplete(prompt: Prompt): Promise<PromptCompletionResult>;
27
+ gptComplete(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
28
28
  /**
29
29
  * Default model for chat variant.
30
30
  */
@@ -10,7 +10,9 @@ import type { CommonExecutionToolsOptions } from '../../../CommonExecutionToolsO
10
10
  export type OpenAiExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions & {
11
11
  /**
12
12
  * A unique identifier representing your end-user, which can help OpenAI to monitor
13
- * and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
13
+ * and detect abuse.
14
+ *
15
+ * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
14
16
  */
15
17
  user?: string_token;
16
18
  };
@@ -1,13 +1,16 @@
1
1
  /**
2
- * String value found on openai page
2
+ * String value found on OpenAI and Anthropic Claude page
3
+ *
4
+ * @see https://openai.com/api/pricing/
5
+ * @see https://docs.anthropic.com/en/docs/models-overview
3
6
  *
4
7
  * @private within the library, used only as internal helper for `OPENAI_MODELS` and `computeUsage`
5
8
  */
6
- type string_openai_price = `$${number}.${number} / ${number}M tokens`;
9
+ type string_model_price = `$${number}.${number} / ${number}M tokens`;
7
10
  /**
8
11
  * Function computeUsage will create price per one token based on the string value found on openai page
9
12
  *
10
13
  * @private within the library, used only as internal helper for `OPENAI_MODELS`
11
14
  */
12
- export declare function computeUsage(value: string_openai_price): number;
15
+ export declare function computeUsage(value: string_model_price): number;
13
16
  export {};
@@ -16,7 +16,8 @@ export declare const OPENAI_MODELS: Array<AvailableModel & {
16
16
  }>;
17
17
  /**
18
18
  * TODO: [🧠] Some mechanism to propagate unsureness
19
- * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
19
+ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
20
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
20
21
  * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
21
22
  * @see https://openai.com/api/pricing/
22
23
  * @see /other/playground/playground.ts
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -8,7 +8,7 @@ import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptio
8
8
  */
9
9
  export declare class JavascriptEvalExecutionTools implements ScriptExecutionTools {
10
10
  private readonly options;
11
- constructor(options: JavascriptExecutionToolsOptions);
11
+ constructor(options?: JavascriptExecutionToolsOptions);
12
12
  /**
13
13
  * Executes a JavaScript
14
14
  */
@@ -1,20 +1,7 @@
1
- import { ScriptExecutionTools, ScriptExecutionToolsExecuteOptions } from '../../../ScriptExecutionTools';
2
- import { JavascriptExecutionToolsOptions } from './JavascriptExecutionToolsOptions';
1
+ import { JavascriptEvalExecutionTools } from './JavascriptEvalExecutionTools';
3
2
  /**
4
- * ScriptExecutionTools for JavaScript implemented via vm2
3
+ * Placeholder for better implementation of JavascriptExecutionTools - some propper sandboxing
5
4
  *
6
- * Warning: This is not implemented yet
7
- */
8
- export declare class JavascriptExecutionTools implements ScriptExecutionTools {
9
- private readonly options;
10
- constructor(options: JavascriptExecutionToolsOptions);
11
- /**
12
- * Executes a JavaScript
13
- */
14
- execute(options: ScriptExecutionToolsExecuteOptions): Promise<string>;
15
- }
16
- /**
17
- * TODO: !! Pass isVerbose to constructor and use it
18
- * TODO: !! Probbably make some common util createStatementToEvaluate
19
- * TODO: !! Implement via vm2
5
+ * @alias JavascriptExecutionTools
20
6
  */
7
+ export declare const JavascriptExecutionTools: typeof JavascriptEvalExecutionTools;
@@ -1,4 +1,16 @@
1
+ import { string_folder_path } from '../../types/typeAliases';
1
2
  import { PromptbookLibrary } from '../PromptbookLibrary';
3
+ /**
4
+ * Options for `createPromptbookLibraryFromDirectory` function
5
+ */
6
+ type CreatePromptbookLibraryFromDirectoryOptions = {
7
+ /**
8
+ * If true, the directory is searched recursively for promptbooks
9
+ *
10
+ * @default true
11
+ */
12
+ isRecursive?: boolean;
13
+ };
2
14
  /**
3
15
  * Constructs Promptbook from given directory
4
16
  *
@@ -6,9 +18,12 @@ import { PromptbookLibrary } from '../PromptbookLibrary';
6
18
  * Note: The function does NOT return promise it returns the library directly which dynamically loads promptbooks when needed
7
19
  * SO during the construction syntax and logic sources IS NOT validated
8
20
  *
21
+ * @param path - path to the directory with promptbooks
22
+ * @param options - Misc options for the library
9
23
  * @returns PromptbookLibrary
10
24
  */
11
- export declare function createPromptbookLibraryFromDirectory(): PromptbookLibrary;
25
+ export declare function createPromptbookLibraryFromDirectory(path: string_folder_path, options?: CreatePromptbookLibraryFromDirectoryOptions): PromptbookLibrary;
26
+ export {};
12
27
  /***
13
28
  * TODO: [🍓][🚯] !!! Add to README and samples + maybe make `@promptbook/library` package
14
29
  */
@@ -1,5 +0,0 @@
1
- import { Wizzard } from '../wizzard/Wizzard';
2
- export { Wizzard };
3
- /**
4
- * TODO: [🧙‍♂️]
5
- */
@@ -1,5 +0,0 @@
1
- import { Wizzard } from '../wizzard/Wizzard';
2
- export { Wizzard };
3
- /**
4
- * TODO: [🧙‍♂️]
5
- */