@promptbook/remote-server 0.66.0-1 → 0.66.0-5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/esm/index.es.js +47 -21
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +8 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +4 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +10 -2
  7. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  8. package/esm/typings/src/config.d.ts +0 -7
  9. package/esm/typings/src/execution/AvailableModel.d.ts +20 -0
  10. package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -19
  11. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +10 -0
  12. package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts +1 -1
  13. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +1 -1
  14. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.test.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +10 -0
  16. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +10 -0
  17. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +7 -13
  18. package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +27 -0
  19. package/esm/typings/src/llm-providers/_common/LlmToolsOptions.d.ts +7 -0
  20. package/esm/typings/src/llm-providers/_common/config.d.ts +4 -0
  21. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
  22. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  23. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +7 -2
  26. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +8 -0
  27. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +11 -0
  28. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
  29. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  30. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  31. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
  33. package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.d.ts → computeOpenAiUsage.d.ts} +2 -2
  34. package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.test.d.ts → computeOpenAiUsage.test.d.ts} +1 -1
  35. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +15 -0
  36. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  37. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +8 -0
  38. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +11 -0
  39. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  40. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +2 -2
  41. package/esm/typings/src/personas/preparePersona.test.d.ts +1 -1
  42. package/esm/typings/src/utils/Register.d.ts +22 -0
  43. package/package.json +2 -2
  44. package/umd/index.umd.js +47 -21
  45. package/umd/index.umd.js.map +1 -1
@@ -10,11 +10,12 @@ import type { ExpectFormatCommand } from '../commands/EXPECT/ExpectFormatCommand
10
10
  import type { PipelineStringToJsonOptions } from '../conversion/pipelineStringToJson';
11
11
  import type { PrettifyOptions } from '../conversion/prettify/PrettifyOptions';
12
12
  import type { renderPipelineMermaidOptions } from '../conversion/prettify/renderPipelineMermaidOptions';
13
+ import type { AvailableModel } from '../execution/AvailableModel';
13
14
  import type { CommonExecutionToolsOptions } from '../execution/CommonExecutionToolsOptions';
14
15
  import type { EmbeddingVector } from '../execution/EmbeddingVector';
15
16
  import type { ExecutionTools } from '../execution/ExecutionTools';
16
17
  import type { LlmExecutionTools } from '../execution/LlmExecutionTools';
17
- import type { AvailableModel } from '../execution/LlmExecutionTools';
18
+ import type { LlmExecutionToolsConstructor } from '../execution/LlmExecutionToolsConstructor';
18
19
  import type { PipelineExecutor } from '../execution/PipelineExecutor';
19
20
  import type { PipelineExecutorResult } from '../execution/PipelineExecutor';
20
21
  import type { PromptResult } from '../execution/PromptResult';
@@ -31,6 +32,8 @@ import type { UserInterfaceTools } from '../execution/UserInterfaceTools';
31
32
  import type { UserInterfaceToolsPromptDialogOptions } from '../execution/UserInterfaceTools';
32
33
  import type { CallbackInterfaceToolsOptions } from '../knowledge/dialogs/callback/CallbackInterfaceToolsOptions';
33
34
  import type { LlmToolsConfiguration } from '../llm-providers/_common/LlmToolsConfiguration';
35
+ import type { LlmToolsMetadata } from '../llm-providers/_common/LlmToolsMetadata';
36
+ import type { LlmToolsOptions } from '../llm-providers/_common/LlmToolsOptions';
34
37
  import type { CacheItem } from '../llm-providers/_common/utils/cache/CacheItem';
35
38
  import type { CacheLlmToolsOptions } from '../llm-providers/_common/utils/cache/CacheLlmToolsOptions';
36
39
  import type { LlmExecutionToolsWithTotalUsage } from '../llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
@@ -212,6 +215,7 @@ import type { string_SCREAMING_CASE } from '../utils/normalization/normalizeTo_S
212
215
  import type { string_snake_case } from '../utils/normalization/normalizeTo_snake_case';
213
216
  import type { really_any } from '../utils/organization/really_any';
214
217
  import type { TODO_any } from '../utils/organization/TODO_any';
218
+ import type { Registered } from '../utils/Register';
215
219
  import type { string_promptbook_version } from '../version';
216
220
  export type { PipelineCollection };
217
221
  export type { Command };
@@ -225,11 +229,12 @@ export type { ExpectFormatCommand };
225
229
  export type { PipelineStringToJsonOptions };
226
230
  export type { PrettifyOptions };
227
231
  export type { renderPipelineMermaidOptions };
232
+ export type { AvailableModel };
228
233
  export type { CommonExecutionToolsOptions };
229
234
  export type { EmbeddingVector };
230
235
  export type { ExecutionTools };
231
236
  export type { LlmExecutionTools };
232
- export type { AvailableModel };
237
+ export type { LlmExecutionToolsConstructor };
233
238
  export type { PipelineExecutor };
234
239
  export type { PipelineExecutorResult };
235
240
  export type { PromptResult };
@@ -246,6 +251,8 @@ export type { UserInterfaceTools };
246
251
  export type { UserInterfaceToolsPromptDialogOptions };
247
252
  export type { CallbackInterfaceToolsOptions };
248
253
  export type { LlmToolsConfiguration };
254
+ export type { LlmToolsMetadata };
255
+ export type { LlmToolsOptions };
249
256
  export type { CacheItem };
250
257
  export type { CacheLlmToolsOptions };
251
258
  export type { LlmExecutionToolsWithTotalUsage };
@@ -427,4 +434,5 @@ export type { string_SCREAMING_CASE };
427
434
  export type { string_snake_case };
428
435
  export type { really_any };
429
436
  export type { TODO_any };
437
+ export type { Registered };
430
438
  export type { string_promptbook_version };
@@ -6,7 +6,7 @@ import type { Command as Program } from 'commander';
6
6
  */
7
7
  export declare function initializeMakeCommand(program: Program): void;
8
8
  /**
9
- * TODO: [🥃] !!! Allow `ptbk make` without llm tools
9
+ * TODO: [🥃] !!! Allow `ptbk make` without configuring any llm tools
10
10
  * TODO: Maybe remove this command - "about" command should be enough?
11
11
  * TODO: [0] DRY Javascript and typescript - Maybe make ONLY typescript and for javascript just remove types
12
12
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
@@ -1,4 +1,3 @@
1
- import type { LlmToolsConfiguration } from './llm-providers/_common/LlmToolsConfiguration';
2
1
  /**
3
2
  * Warning message for the generated sections and files files
4
3
  *
@@ -120,12 +119,6 @@ export declare const DEFAULT_REMOTE_URL = "https://api.pavolhejny.com/";
120
119
  * @public exported from `@promptbook/core`
121
120
  */
122
121
  export declare const DEFAULT_REMOTE_URL_PATH = "/promptbook/socket.io";
123
- /**
124
- * @@@
125
- *
126
- * @public exported from `@promptbook/core`
127
- */
128
- export declare const BOILERPLATE_LLM_TOOLS_CONFIGURATION_: LlmToolsConfiguration;
129
122
  /**
130
123
  * @@@
131
124
  *
@@ -0,0 +1,20 @@
1
+ import type { ModelVariant } from '../types/ModelVariant';
2
+ import type { string_model_name } from '../types/typeAliases';
3
+ import type { string_title } from '../types/typeAliases';
4
+ /**
5
+ * Represents a model that can be used for prompt execution
6
+ */
7
+ export type AvailableModel = {
8
+ /**
9
+ * The model title
10
+ */
11
+ readonly modelTitle: string_title;
12
+ /**
13
+ * The model name aviailable
14
+ */
15
+ readonly modelName: string_model_name;
16
+ /**
17
+ * Variant of the model
18
+ */
19
+ readonly modelVariant: ModelVariant;
20
+ };
@@ -1,10 +1,9 @@
1
1
  import type { Promisable } from 'type-fest';
2
- import type { ModelVariant } from '../types/ModelVariant';
3
2
  import type { Prompt } from '../types/Prompt';
4
3
  import type { string_markdown } from '../types/typeAliases';
5
4
  import type { string_markdown_text } from '../types/typeAliases';
6
- import type { string_model_name } from '../types/typeAliases';
7
5
  import type { string_title } from '../types/typeAliases';
6
+ import type { AvailableModel } from './AvailableModel';
8
7
  import type { ChatPromptResult } from './PromptResult';
9
8
  import type { CompletionPromptResult } from './PromptResult';
10
9
  import type { EmbeddingPromptResult } from './PromptResult';
@@ -51,23 +50,6 @@ export type LlmExecutionTools = {
51
50
  */
52
51
  callEmbeddingModel?(prompt: Prompt): Promise<EmbeddingPromptResult>;
53
52
  };
54
- /**
55
- * Represents a model that can be used for prompt execution
56
- */
57
- export type AvailableModel = {
58
- /**
59
- * The model title
60
- */
61
- readonly modelTitle: string_title;
62
- /**
63
- * The model name aviailable
64
- */
65
- readonly modelName: string_model_name;
66
- /**
67
- * Variant of the model
68
- */
69
- readonly modelVariant: ModelVariant;
70
- };
71
53
  /**
72
54
  * TODO: Implement destroyable pattern to free resources
73
55
  * TODO: [🏳] Add `callTranslationModel`
@@ -0,0 +1,10 @@
1
+ import type { TODO_any } from '../utils/organization/TODO_any';
2
+ import type { Registered } from '../utils/Register';
3
+ import type { LlmExecutionTools } from './LlmExecutionTools';
4
+ /**
5
+ * @@@
6
+ */
7
+ export type LlmExecutionToolsConstructor = Registered & ((options: TODO_any) => LlmExecutionTools);
8
+ /**
9
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
10
+ */
@@ -1 +1 @@
1
- export {};
1
+ import '../../../_packages/core.index';
@@ -1,4 +1,4 @@
1
- export {};
1
+ import '../../../_packages/core.index';
2
2
  /**
3
3
  * TODO: [📓] Maybe test all file in samples (not just 10-simple.md)
4
4
  */
@@ -1 +1 @@
1
- export {};
1
+ import '../../../_packages/core.index';
@@ -0,0 +1,10 @@
1
+ import { Register } from '../../utils/Register';
2
+ import type { LlmToolsMetadata } from './LlmToolsMetadata';
3
+ /**
4
+ * @@@
5
+ *
6
+ * Note: `$` is used to indicate that this interacts with the global scope
7
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
8
+ * @public exported from `@promptbook/core`
9
+ */
10
+ export declare const $llmToolsMetadataRegister: Register<LlmToolsMetadata>;
@@ -0,0 +1,10 @@
1
+ import type { LlmExecutionToolsConstructor } from '../../execution/LlmExecutionToolsConstructor';
2
+ import { Register } from '../../utils/Register';
3
+ /**
4
+ * @@@
5
+ *
6
+ * Note: `$` is used to indicate that this interacts with the global scope
7
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
8
+ * @public exported from `@promptbook/core`
9
+ */
10
+ export declare const $llmToolsRegister: Register<LlmExecutionToolsConstructor>;
@@ -1,10 +1,12 @@
1
1
  import type { string_title } from '../../types/typeAliases';
2
- import type { TODO_object } from '../../utils/organization/TODO_object';
3
- import type { TODO_string } from '../../utils/organization/TODO_string';
2
+ import type { Registered } from '../../utils/Register';
3
+ import type { LlmToolsOptions } from './LlmToolsOptions';
4
4
  /**
5
5
  * @@@
6
+ *
7
+ * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
6
8
  */
7
- export type LlmToolsConfiguration = Array<{
9
+ export type LlmToolsConfiguration = Array<Registered & {
8
10
  /**
9
11
  * @@@
10
12
  */
@@ -12,17 +14,9 @@ export type LlmToolsConfiguration = Array<{
12
14
  /**
13
15
  * @@@
14
16
  */
15
- packageName: TODO_string;
16
- /**
17
- * @@@
18
- */
19
- className: TODO_string;
20
- /**
21
- * @@@
22
- */
23
- options: TODO_object;
17
+ options: LlmToolsOptions;
24
18
  }>;
25
19
  /**
26
20
  * TODO: [🧠][🌰] `title` is redundant BUT maybe allow each provider pass it's own title for tracking purposes
27
- * TODO: [🧠] Maybe add option for `constructorName` instead of `className`
21
+ * TODO: Maybe instead of `LlmToolsConfiguration[number]` make `LlmToolsConfigurationItem`
28
22
  */
@@ -0,0 +1,27 @@
1
+ import type { string_name } from '../../types/typeAliases';
2
+ import type { string_title } from '../../types/typeAliases';
3
+ import type { Registered } from '../../utils/Register';
4
+ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
5
+ /**
6
+ * @@@
7
+ *
8
+ * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
9
+ */
10
+ export type LlmToolsMetadata = Registered & {
11
+ /**
12
+ * @@@
13
+ */
14
+ title: string_title;
15
+ /**
16
+ * @@@
17
+ */
18
+ getBoilerplateConfiguration(): LlmToolsConfiguration[number];
19
+ /**
20
+ * @@@
21
+ */
22
+ createConfigurationFromEnv(env: Record<string_name, string>): LlmToolsConfiguration[number] | null;
23
+ };
24
+ /**
25
+ * TODO: Add configuration schema and maybe some documentation link
26
+ * TODO: Maybe constrain LlmToolsConfiguration[number] by generic to ensure that `createConfigurationFromEnv` and `getBoilerplateConfiguration` always create same `packageName` and `className`
27
+ */
@@ -0,0 +1,7 @@
1
+ import type { TODO_object } from '../../utils/organization/TODO_object';
2
+ /**
3
+ * @@@
4
+ *
5
+ * @@@ `LlmToolsMetadata` vs `LlmToolsConfiguration` vs `LlmToolsOptions` (vs `Registered`)
6
+ */
7
+ export type LlmToolsOptions = TODO_object;
@@ -1,6 +1,10 @@
1
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
2
  import type { TODO_any } from '../../utils/organization/TODO_any';
3
3
  /**
4
+ * @@@
5
+ *
6
+ * TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
7
+ *
4
8
  * @private internal type for `createLlmToolsFromConfiguration`
5
9
  */
6
10
  export declare const EXECUTION_TOOLS_CLASSES: Record<`create${string}`, (options: TODO_any) => LlmExecutionTools>;
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { Prompt } from '../../types/Prompt';
@@ -47,7 +47,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
47
47
  * TODO: [🍆] JSON mode
48
48
  * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
49
49
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
50
- * TODO: Maybe make custom OpenaiError
50
+ * TODO: Maybe make custom OpenAiError
51
51
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
52
52
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
53
53
  * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available Anthropic Claude models with pricing
@@ -14,5 +14,5 @@ import type { Prompt } from '../../types/Prompt';
14
14
  export declare function computeAnthropicClaudeUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15
15
  resultContent: string, rawResponse: PartialDeep<Pick<Anthropic.Messages.Message, 'model' | 'usage'>>): PromptResultUsage;
16
16
  /**
17
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
17
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
18
18
  */
@@ -1,4 +1,4 @@
1
1
  export {};
2
2
  /**
3
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
3
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
4
4
  */
@@ -6,9 +6,14 @@ import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExec
6
6
  *
7
7
  * @public exported from `@promptbook/anthropic-claude`
8
8
  */
9
- export declare function createAnthropicClaudeExecutionTools(options: AnthropicClaudeExecutionToolsOptions): AnthropicClaudeExecutionTools | RemoteLlmExecutionTools;
9
+ export declare const createAnthropicClaudeExecutionTools: ((options: AnthropicClaudeExecutionToolsOptions) => AnthropicClaudeExecutionTools | RemoteLlmExecutionTools) & {
10
+ packageName: string;
11
+ className: string;
12
+ };
10
13
  /**
11
14
  * TODO: [🧠] !!!! Make anonymous this with all LLM providers
12
- * TODO: [🧠] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
15
+ * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
13
16
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
17
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
18
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
14
19
  */
@@ -0,0 +1,8 @@
1
+ /**
2
+ * @@@ registration1 of default configuration for Anthropic Claude
3
+ *
4
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
5
+ *
6
+ * @public exported from `@promptbook/core`
7
+ */
8
+ export declare const _AnthropicClaudeMetadataRegistration: void;
@@ -0,0 +1,11 @@
1
+ /**
2
+ * @@@ registration2
3
+ *
4
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
5
+ *
6
+ * @public exported from `@promptbook/anthropic-claude`
7
+ */
8
+ export declare const _AnthropicClaudeRegistration: void;
9
+ /**
10
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
11
+ */
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -45,7 +45,7 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
45
45
  }
46
46
  /**
47
47
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
48
- * TODO: Maybe make custom AzureOpenaiError
48
+ * TODO: Maybe make custom AzureOpenAiError
49
49
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
50
50
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
51
51
  */
@@ -1,5 +1,5 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
1
2
  import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
2
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
5
5
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -1,5 +1,5 @@
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
1
2
  import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
2
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
5
5
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -9,7 +9,7 @@ import type { string_markdown_text } from '../../types/typeAliases';
9
9
  import type { string_title } from '../../types/typeAliases';
10
10
  import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
11
11
  /**
12
- * Execution Tools for calling OpenAI API.
12
+ * Execution Tools for calling OpenAI API
13
13
  *
14
14
  * @public exported from `@promptbook/openai`
15
15
  */
@@ -63,7 +63,7 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
63
63
  /**
64
64
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
65
65
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
66
- * TODO: Maybe make custom OpenaiError
66
+ * TODO: Maybe make custom OpenAiError
67
67
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
68
68
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
69
69
  */
@@ -11,8 +11,8 @@ import type { Prompt } from '../../types/Prompt';
11
11
  * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
12
12
  * @private internal utility of `OpenAiExecutionTools`
13
13
  */
14
- export declare function computeOpenaiUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
14
+ export declare function computeOpenAiUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15
15
  resultContent: string, rawResponse: PartialDeep<Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>>): PromptResultUsage;
16
16
  /**
17
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
17
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
18
18
  */
@@ -1,4 +1,4 @@
1
1
  export {};
2
2
  /**
3
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
3
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
4
4
  */
@@ -0,0 +1,15 @@
1
+ import { OpenAiExecutionTools } from './OpenAiExecutionTools';
2
+ import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
3
+ /**
4
+ * Execution Tools for calling OpenAI API
5
+ *
6
+ * @public exported from `@promptbook/openai`
7
+ */
8
+ export declare const createOpenAiExecutionTools: ((options: OpenAiExecutionToolsOptions) => OpenAiExecutionTools) & {
9
+ packageName: string;
10
+ className: string;
11
+ };
12
+ /**
13
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
14
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15
+ */
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { number_usd } from '../../types/typeAliases';
3
3
  /**
4
4
  * List of available OpenAI models with pricing
@@ -0,0 +1,8 @@
1
+ /**
2
+ * @@@ registration1 of default configuration for Open AI
3
+ *
4
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
5
+ *
6
+ * @public exported from `@promptbook/core`
7
+ */
8
+ export declare const _OpenAiMetadataRegistration: void;
@@ -0,0 +1,11 @@
1
+ /**
2
+ * @@@ registration2
3
+ *
4
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
5
+ *
6
+ * @public exported from `@promptbook/openai`
7
+ */
8
+ export declare const _OpenAiRegistration: void;
9
+ /**
10
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
11
+ */
@@ -1,4 +1,4 @@
1
- import type { AvailableModel } from '../../execution/LlmExecutionTools';
1
+ import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { CompletionPromptResult } from '../../execution/PromptResult';
@@ -1,5 +1,5 @@
1
+ import type { AvailableModel } from '../../../execution/AvailableModel';
1
2
  import type { CommonExecutionToolsOptions } from '../../../execution/CommonExecutionToolsOptions';
2
- import type { AvailableModel } from '../../../execution/LlmExecutionTools';
3
3
  import type { client_id } from '../../../types/typeAliases';
4
4
  import type { string_base_url } from '../../../types/typeAliases';
5
5
  import type { string_uri } from '../../../types/typeAliases';
@@ -53,4 +53,4 @@ export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
53
53
  });
54
54
  /**
55
55
  * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
56
- */
56
+ */
@@ -1 +1 @@
1
- export {};
1
+ import '../../src/_packages/core.index';
@@ -0,0 +1,22 @@
1
+ import type { TODO_string } from './organization/TODO_string';
2
+ export type Registered = {
3
+ /**
4
+ * @@@
5
+ */
6
+ packageName: TODO_string;
7
+ /**
8
+ * @@@
9
+ */
10
+ className: TODO_string;
11
+ };
12
+ /**
13
+ * Register is @@@
14
+ *
15
+ * @private internal utility, exported are only signleton instances of this class
16
+ */
17
+ export declare class Register<TRegistered extends Registered> {
18
+ private readonly storage;
19
+ constructor(storage: Array<TRegistered>);
20
+ list(): Array<TRegistered>;
21
+ register(registered: TRegistered): void;
22
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.66.0-1",
3
+ "version": "0.66.0-5",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.66.0-1"
50
+ "@promptbook/core": "0.66.0-5"
51
51
  },
52
52
  "dependencies": {
53
53
  "@anthropic-ai/sdk": "0.26.1",