@promptbook/openai 0.66.0-0 → 0.66.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/esm/index.es.js +604 -536
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +12 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +4 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +6 -2
  7. package/esm/typings/src/config.d.ts +15 -0
  8. package/esm/typings/src/execution/AvailableModel.d.ts +20 -0
  9. package/esm/typings/src/execution/LlmExecutionTools.d.ts +12 -24
  10. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +10 -0
  11. package/esm/typings/src/llm-providers/_common/$llmToolsConfigurationBoilerplatesRegister.d.ts +12 -0
  12. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +10 -0
  13. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +2 -10
  14. package/esm/typings/src/llm-providers/_common/config.d.ts +5 -6
  15. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +18 -0
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +4 -0
  19. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +7 -2
  20. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +8 -0
  21. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +11 -0
  22. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +4 -0
  23. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
  24. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  26. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
  28. package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.d.ts → computeOpenAiUsage.d.ts} +6 -2
  29. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.test.d.ts +4 -0
  30. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +15 -0
  31. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +8 -0
  33. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +11 -0
  34. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  35. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +5 -2
  36. package/esm/typings/src/utils/Register.d.ts +22 -0
  37. package/esm/typings/src/utils/environment/getGlobalScope.d.ts +3 -0
  38. package/package.json +2 -2
  39. package/umd/index.umd.js +605 -535
  40. package/umd/index.umd.js.map +1 -1
  41. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +0 -1
@@ -0,0 +1,11 @@
1
+ /**
2
+ * @@@ registration2
3
+ *
4
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
5
+ *
6
+ * @public exported from `@promptbook/anthropic-claude`
7
+ */
8
+ export declare const _AnthropicClaudeRegistration: void;
9
+ /**
10
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
11
+ */
@@ -0,0 +1,4 @@
1
+ /**
2
+ * @public exported from `@promptbook/anthropic-claude`
3
+ */
4
+ export declare const _: undefined;
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -45,7 +45,7 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
45
45
  }
46
46
  /**
47
47
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
48
- * TODO: Maybe make custom AzureOpenaiError
48
+ * TODO: Maybe make custom AzureOpenAiError
49
49
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
50
50
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
51
51
  */
@@ -1,5 +1,5 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
1
2
  import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
2
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
5
5
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -1,5 +1,5 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
1
2
  import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
2
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
5
5
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -9,7 +9,7 @@ import type { string_markdown_text } from '../../types/typeAliases';
9
9
  import type { string_title } from '../../types/typeAliases';
10
10
  import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
11
11
  /**
12
- * Execution Tools for calling OpenAI API.
12
+ * Execution Tools for calling OpenAI API
13
13
  *
14
14
  * @public exported from `@promptbook/openai`
15
15
  */
@@ -63,7 +63,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
63
63
  /**
64
64
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
65
65
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
66
- * TODO: Maybe make custom OpenaiError
66
+ * TODO: Maybe make custom OpenAiError
67
67
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
68
68
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
69
69
  */
@@ -1,4 +1,5 @@
1
1
  import type OpenAI from 'openai';
2
+ import type { PartialDeep } from 'type-fest';
2
3
  import type { PromptResultUsage } from '../../execution/PromptResultUsage';
3
4
  import type { Prompt } from '../../types/Prompt';
4
5
  /**
@@ -10,5 +11,8 @@ import type { Prompt } from '../../types/Prompt';
10
11
  * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
11
12
  * @private internal utility of `OpenAiExecutionTools`
12
13
  */
13
- export declare function computeOpenaiUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
14
- resultContent: string, rawResponse: Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>): PromptResultUsage;
14
+ export declare function computeOpenAiUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15
+ resultContent: string, rawResponse: PartialDeep<Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>>): PromptResultUsage;
16
+ /**
17
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
18
+ */
@@ -0,0 +1,4 @@
1
+ export {};
2
+ /**
3
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
4
+ */
@@ -0,0 +1,15 @@
1
+ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
2
+ import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
3
+ /**
4
+ * Execution Tools for calling OpenAI API
5
+ *
6
+ * @public exported from `@promptbook/openai`
7
+ */
8
+ export declare const createOpenAiExecutionTools: ((options: OpenAiExecutionToolsOptions) => OpenAiExecutionTools) & {
9
+ packageName: string;
10
+ className: string;
11
+ };
12
+ /**
13
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
14
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15
+ */
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available OpenAI models with pricing
@@ -0,0 +1,8 @@
1
+ /**
2
+ * @@@ registration1 of default configuration for Open AI
3
+ *
4
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
5
+ *
6
+ * @public exported from `@promptbook/core`
7
+ */
8
+ export declare const _OpenAiConfigurationRegistration: void;
@@ -0,0 +1,11 @@
1
+ /**
2
+ * @@@ registration2
3
+ *
4
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
5
+ *
6
+ * @public exported from `@promptbook/openai`
7
+ */
8
+ export declare const _OpenAiRegistration: void;
9
+ /**
10
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
11
+ */
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -1,5 +1,5 @@
1
+ import type { AvailableModel } from '../../../execution/AvailableModel';
1
2
  import type { CommonExecutionToolsOptions } from '../../../execution/CommonExecutionToolsOptions';
2
- import type { AvailableModel } from '../../../execution/LlmExecutionTools';
3
3
  import type { client_id } from '../../../types/typeAliases';
4
4
  import type { string_base_url } from '../../../types/typeAliases';
5
5
  import type { string_uri } from '../../../types/typeAliases';
@@ -25,7 +25,7 @@ export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
25
25
  /**
26
26
  * If set, only these models will be listed as available
27
27
  *
28
- * TODO: [🧠] !!!! Figure out better solution
28
+ * TODO: [🧠] !!!!!! Figure out better solution
29
29
  */
30
30
  readonly models?: Array<AvailableModel>;
31
31
  /**
@@ -51,3 +51,6 @@ export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
51
51
  */
52
52
  readonly clientId: client_id;
53
53
  });
54
+ /**
55
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
56
+ */
@@ -0,0 +1,22 @@
1
+ import type { TODO_string } from './organization/TODO_string';
2
+ export type Registered = {
3
+ /**
4
+ * @@@
5
+ */
6
+ packageName: TODO_string;
7
+ /**
8
+ * @@@
9
+ */
10
+ className: TODO_string;
11
+ };
12
+ /**
13
+ * Register is @@@
14
+ *
15
+ * @private internal utility, exported are only signleton instances of this class
16
+ */
17
+ export declare class Register<TRegistered extends Registered> {
18
+ private readonly storage;
19
+ constructor(storage: Array<TRegistered>);
20
+ list(): Array<TRegistered>;
21
+ register(registered: TRegistered): void;
22
+ }
@@ -7,3 +7,6 @@ import type { really_any } from '../organization/really_any';
7
7
  * @public exported from `@promptbook/utils`
8
8
  */
9
9
  export declare function $getGlobalScope(): really_any;
10
+ /***
11
+ * TODO: !!!!! Make private and promptbook registry from this
12
+ */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/openai",
3
- "version": "0.66.0-0",
3
+ "version": "0.66.0-4",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/openai.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.66.0-0"
50
+ "@promptbook/core": "0.66.0-4"
51
51
  },
52
52
  "dependencies": {
53
53
  "colors": "1.4.0",