@promptbook/openai 0.65.0-2 → 0.65.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. package/esm/index.es.js +1 -1
  2. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +6 -0
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/node.index.d.ts +0 -2
  5. package/esm/typings/src/_packages/remote-client.index.d.ts +2 -2
  6. package/esm/typings/src/_packages/types.index.d.ts +16 -2
  7. package/esm/typings/src/llm-providers/_common/config.d.ts +3 -3
  8. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -3
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +23 -2
  10. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +13 -0
  11. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +3 -2
  13. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Error.d.ts +2 -2
  14. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Progress.d.ts +2 -2
  15. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Request.d.ts +14 -2
  16. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +49 -0
  17. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +23 -2
  18. package/esm/typings/src/llm-providers/remote/playground/playground.d.ts +2 -0
  19. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -1
  20. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  21. package/package.json +2 -2
  22. package/umd/index.umd.js +1 -1
  23. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionToolsOptions.d.ts +0 -26
package/esm/index.es.js CHANGED
@@ -6,7 +6,7 @@ import spaceTrim$1, { spaceTrim } from 'spacetrim';
6
6
  /**
7
7
  * The version of the Promptbook library
8
8
  */
9
- var PROMPTBOOK_VERSION = '0.65.0-1';
9
+ var PROMPTBOOK_VERSION = '0.65.0-3';
10
10
  // TODO: !!!! List here all the versions and annotate + put into script
11
11
 
12
12
  /*! *****************************************************************************
@@ -2,7 +2,13 @@ import { PROMPTBOOK_VERSION } from '../version';
2
2
  import { ANTHROPIC_CLAUDE_MODELS } from '../llm-providers/anthropic-claude/anthropic-claude-models';
3
3
  import { AnthropicClaudeExecutionTools } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionTools';
4
4
  import type { AnthropicClaudeExecutionToolsOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
5
+ import type { AnthropicClaudeExecutionToolsDirectOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
6
+ import type { AnthropicClaudeExecutionToolsProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
7
+ import { createAnthropicClaudeExecutionTools } from '../llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools';
5
8
  export { PROMPTBOOK_VERSION };
6
9
  export { ANTHROPIC_CLAUDE_MODELS };
7
10
  export { AnthropicClaudeExecutionTools };
8
11
  export type { AnthropicClaudeExecutionToolsOptions };
12
+ export type { AnthropicClaudeExecutionToolsDirectOptions };
13
+ export type { AnthropicClaudeExecutionToolsProxiedOptions };
14
+ export { createAnthropicClaudeExecutionTools };
@@ -45,6 +45,7 @@ import { CallbackInterfaceTools } from '../knowledge/dialogs/callback/CallbackIn
45
45
  import type { CallbackInterfaceToolsOptions } from '../knowledge/dialogs/callback/CallbackInterfaceToolsOptions';
46
46
  import { prepareKnowledgePieces } from '../knowledge/prepare-knowledge/_common/prepareKnowledgePieces';
47
47
  import { prepareKnowledgeFromMarkdown } from '../knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown';
48
+ import { LLM_CONFIGURATION_BOILERPLATES } from '../llm-providers/_common/config';
48
49
  import { createLlmToolsFromConfiguration } from '../llm-providers/_common/createLlmToolsFromConfiguration';
49
50
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
50
51
  import { countTotalUsage } from '../llm-providers/_common/utils/count-total-usage/countTotalUsage';
@@ -109,6 +110,7 @@ export { CallbackInterfaceTools };
109
110
  export type { CallbackInterfaceToolsOptions };
110
111
  export { prepareKnowledgePieces };
111
112
  export { prepareKnowledgeFromMarkdown };
113
+ export { LLM_CONFIGURATION_BOILERPLATES };
112
114
  export { createLlmToolsFromConfiguration };
113
115
  export { cacheLlmTools };
114
116
  export { countTotalUsage };
@@ -1,12 +1,10 @@
1
1
  import { PROMPTBOOK_VERSION } from '../version';
2
2
  import { createCollectionFromDirectory } from '../collection/constructors/createCollectionFromDirectory';
3
- import { LLM_CONFIGURATION_BOILERPLATES } from '../llm-providers/_common/config';
4
3
  import { createLlmToolsFromConfigurationFromEnv } from '../llm-providers/_common/createLlmToolsFromConfigurationFromEnv';
5
4
  import { createLlmToolsFromEnv } from '../llm-providers/_common/createLlmToolsFromEnv';
6
5
  import { FilesStorage } from '../storage/files-storage/FilesStorage';
7
6
  export { PROMPTBOOK_VERSION };
8
7
  export { createCollectionFromDirectory };
9
- export { LLM_CONFIGURATION_BOILERPLATES };
10
8
  export { createLlmToolsFromConfigurationFromEnv };
11
9
  export { createLlmToolsFromEnv };
12
10
  export { FilesStorage };
@@ -1,8 +1,8 @@
1
1
  import { PROMPTBOOK_VERSION } from '../version';
2
+ import type { RemoteLlmExecutionToolsOptions } from '../llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions';
2
3
  import type { RemoteServerOptions } from '../llm-providers/remote/interfaces/RemoteServerOptions';
3
4
  import { RemoteLlmExecutionTools } from '../llm-providers/remote/RemoteLlmExecutionTools';
4
- import type { RemoteLlmExecutionToolsOptions } from '../llm-providers/remote/RemoteLlmExecutionToolsOptions';
5
5
  export { PROMPTBOOK_VERSION };
6
+ export type { RemoteLlmExecutionToolsOptions };
6
7
  export type { RemoteServerOptions };
7
8
  export { RemoteLlmExecutionTools };
8
- export type { RemoteLlmExecutionToolsOptions };
@@ -35,6 +35,8 @@ import type { CacheItem } from '../llm-providers/_common/utils/cache/CacheItem';
35
35
  import type { CacheLlmToolsOptions } from '../llm-providers/_common/utils/cache/CacheLlmToolsOptions';
36
36
  import type { LlmExecutionToolsWithTotalUsage } from '../llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
37
37
  import type { AnthropicClaudeExecutionToolsOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
38
+ import type { AnthropicClaudeExecutionToolsDirectOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
39
+ import type { AnthropicClaudeExecutionToolsProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
38
40
  import type { AzureOpenAiExecutionToolsOptions } from '../llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions';
39
41
  import type { LangtailExecutionToolsOptions } from '../llm-providers/langtail/LangtailExecutionToolsOptions';
40
42
  import type { MultipleLlmExecutionTools } from '../llm-providers/multiple/MultipleLlmExecutionTools';
@@ -42,9 +44,13 @@ import type { OpenAiExecutionToolsOptions } from '../llm-providers/openai/OpenAi
42
44
  import type { Promptbook_Server_Error } from '../llm-providers/remote/interfaces/Promptbook_Server_Error';
43
45
  import type { Promptbook_Server_Progress } from '../llm-providers/remote/interfaces/Promptbook_Server_Progress';
44
46
  import type { Promptbook_Server_Request } from '../llm-providers/remote/interfaces/Promptbook_Server_Request';
47
+ import type { Promptbook_Server_CollectionRequest } from '../llm-providers/remote/interfaces/Promptbook_Server_Request';
48
+ import type { Promptbook_Server_AnonymousRequest } from '../llm-providers/remote/interfaces/Promptbook_Server_Request';
45
49
  import type { Promptbook_Server_Response } from '../llm-providers/remote/interfaces/Promptbook_Server_Response';
50
+ import type { RemoteLlmExecutionToolsOptions } from '../llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions';
46
51
  import type { RemoteServerOptions } from '../llm-providers/remote/interfaces/RemoteServerOptions';
47
- import type { RemoteLlmExecutionToolsOptions } from '../llm-providers/remote/RemoteLlmExecutionToolsOptions';
52
+ import type { AnonymousRemoteServerOptions } from '../llm-providers/remote/interfaces/RemoteServerOptions';
53
+ import type { CollectionRemoteServerOptions } from '../llm-providers/remote/interfaces/RemoteServerOptions';
48
54
  import type { PrepareOptions } from '../prepare/PrepareOptions';
49
55
  import type { JavascriptExecutionToolsOptions } from '../scripting/javascript/JavascriptExecutionToolsOptions';
50
56
  import type { PostprocessingFunction } from '../scripting/javascript/JavascriptExecutionToolsOptions';
@@ -126,6 +132,7 @@ import type { string_css_property } from '../types/typeAliases';
126
132
  import type { string_css_value } from '../types/typeAliases';
127
133
  import type { string_css_selector } from '../types/typeAliases';
128
134
  import type { string_url } from '../types/typeAliases';
135
+ import type { string_base_url } from '../types/typeAliases';
129
136
  import type { string_pipeline_url } from '../types/typeAliases';
130
137
  import type { string_pipeline_url_with_hashtemplate } from '../types/typeAliases';
131
138
  import type { string_data_url } from '../types/typeAliases';
@@ -243,6 +250,8 @@ export type { CacheItem };
243
250
  export type { CacheLlmToolsOptions };
244
251
  export type { LlmExecutionToolsWithTotalUsage };
245
252
  export type { AnthropicClaudeExecutionToolsOptions };
253
+ export type { AnthropicClaudeExecutionToolsDirectOptions };
254
+ export type { AnthropicClaudeExecutionToolsProxiedOptions };
246
255
  export type { AzureOpenAiExecutionToolsOptions };
247
256
  export type { LangtailExecutionToolsOptions };
248
257
  export type { MultipleLlmExecutionTools };
@@ -250,9 +259,13 @@ export type { OpenAiExecutionToolsOptions };
250
259
  export type { Promptbook_Server_Error };
251
260
  export type { Promptbook_Server_Progress };
252
261
  export type { Promptbook_Server_Request };
262
+ export type { Promptbook_Server_CollectionRequest };
263
+ export type { Promptbook_Server_AnonymousRequest };
253
264
  export type { Promptbook_Server_Response };
254
- export type { RemoteServerOptions };
255
265
  export type { RemoteLlmExecutionToolsOptions };
266
+ export type { RemoteServerOptions };
267
+ export type { AnonymousRemoteServerOptions };
268
+ export type { CollectionRemoteServerOptions };
256
269
  export type { PrepareOptions };
257
270
  export type { JavascriptExecutionToolsOptions };
258
271
  export type { PostprocessingFunction };
@@ -334,6 +347,7 @@ export type { string_css_property };
334
347
  export type { string_css_value };
335
348
  export type { string_css_selector };
336
349
  export type { string_url };
350
+ export type { string_base_url };
337
351
  export type { string_pipeline_url };
338
352
  export type { string_pipeline_url_with_hashtemplate };
339
353
  export type { string_data_url };
@@ -1,14 +1,14 @@
1
- import type { TODO_any } from '../../utils/organization/TODO_any';
2
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import type { TODO_any } from '../../utils/organization/TODO_any';
3
3
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
4
  /**
5
- * @public exported from `@promptbook/node`
5
+ * @public exported from `@promptbook/core`
6
6
  */
7
7
  export declare const LLM_CONFIGURATION_BOILERPLATES: LlmToolsConfiguration;
8
8
  /**
9
9
  * @private internal type for `createLlmToolsFromConfiguration`
10
10
  */
11
- export declare const EXECUTION_TOOLS_CLASSES: Record<`get${string}`, (options: TODO_any) => LlmExecutionTools>;
11
+ export declare const EXECUTION_TOOLS_CLASSES: Record<`create${string}`, (options: TODO_any) => LlmExecutionTools>;
12
12
  /**
13
13
  * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
14
14
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
@@ -5,11 +5,12 @@ import type { Prompt } from '../../types/Prompt';
5
5
  import type { string_markdown } from '../../types/typeAliases';
6
6
  import type { string_markdown_text } from '../../types/typeAliases';
7
7
  import type { string_title } from '../../types/typeAliases';
8
- import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExecutionToolsOptions';
8
+ import type { AnthropicClaudeExecutionToolsDirectOptions } from './AnthropicClaudeExecutionToolsOptions';
9
9
  /**
10
10
  * Execution Tools for calling Anthropic Claude API.
11
11
  *
12
12
  * @public exported from `@promptbook/anthropic-claude`
13
+ * @deprecated use `createAnthropicClaudeExecutionTools` instead
13
14
  */
14
15
  export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools {
15
16
  private readonly options;
@@ -22,7 +23,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
22
23
  *
23
24
  * @param options which are relevant are directly passed to the Anthropic Claude client
24
25
  */
25
- constructor(options?: AnthropicClaudeExecutionToolsOptions);
26
+ constructor(options?: AnthropicClaudeExecutionToolsDirectOptions);
26
27
  get title(): string_title & string_markdown_text;
27
28
  get description(): string_markdown;
28
29
  /**
@@ -48,6 +49,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
48
49
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
49
50
  * TODO: Maybe make custom OpenaiError
50
51
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
51
- * TODO: [🍜] Auto use anonymous server in browser
52
+ * TODO: [🍜] !!!!!! Auto use anonymous server in browser
52
53
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
54
+ * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
53
55
  */
@@ -1,12 +1,33 @@
1
1
  import type { ClientOptions } from '@anthropic-ai/sdk';
2
2
  import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
3
+ import type { RemoteLlmExecutionToolsOptions } from '../remote/interfaces/RemoteLlmExecutionToolsOptions';
3
4
  /**
4
5
  * Options for `AnthropicClaudeExecutionTools`
5
6
  *
6
7
  * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
8
  * @public exported from `@promptbook/anthropic-claude`
8
9
  */
9
- export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
10
+ export type AnthropicClaudeExecutionToolsOptions = AnthropicClaudeExecutionToolsDirectOptions | AnthropicClaudeExecutionToolsProxiedOptions;
10
11
  /**
11
- * TODO: [🍜] Auto add WebGPT / Promptbook.studio anonymous server in browser
12
+ * Options for directly used `AnthropicClaudeExecutionTools`
13
+ *
14
+ * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
15
+ * @public exported from `@promptbook/anthropic-claude`
16
+ */
17
+ export type AnthropicClaudeExecutionToolsDirectOptions = CommonExecutionToolsOptions & ClientOptions & {
18
+ isProxied?: false;
19
+ };
20
+ /**
21
+ * Options for proxied `AnthropicClaudeExecutionTools`
22
+ *
23
+ * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
24
+ * @public exported from `@promptbook/anthropic-claude`
25
+ */
26
+ export type AnthropicClaudeExecutionToolsProxiedOptions = CommonExecutionToolsOptions & ClientOptions & {
27
+ isProxied: true;
28
+ } & Pick<RemoteLlmExecutionToolsOptions, 'remoteUrl' | 'path'>;
29
+ /**
30
+ * TODO: [🍜] Default remote remoteUrl and path for anonymous server
31
+ * TODO: [🍜] !!!!!! Auto add WebGPT / Promptbook.studio anonymous server in browser
32
+ * TODO: [🧠][ðŸĪš] Detecting `user`
12
33
  */
@@ -0,0 +1,13 @@
1
+ import { RemoteLlmExecutionTools } from '../remote/RemoteLlmExecutionTools';
2
+ import { AnthropicClaudeExecutionTools } from './AnthropicClaudeExecutionTools';
3
+ import type { AnthropicClaudeExecutionToolsOptions } from './AnthropicClaudeExecutionToolsOptions';
4
+ /**
5
+ * Execution Tools for calling Anthropic Claude API.
6
+ *
7
+ * @public exported from `@promptbook/anthropic-claude`
8
+ */
9
+ export declare function createAnthropicClaudeExecutionTools(options: AnthropicClaudeExecutionToolsOptions): AnthropicClaudeExecutionTools | RemoteLlmExecutionTools;
10
+ /**
11
+ * TODO: !!!!!! Make this with all LLM providers
12
+ * TODO: !!!!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
13
+ */
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
- * TODO: [🍜] Playground with WebGPT / Promptbook.studio anonymous server
4
+ * TODO: [🍜] !!!!!! Playground with WebGPT / Promptbook.studio anonymous server
5
5
  * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
6
6
  */
@@ -9,7 +9,7 @@ import type { EmbeddingPrompt } from '../../types/Prompt';
9
9
  import type { string_markdown } from '../../types/typeAliases';
10
10
  import type { string_markdown_text } from '../../types/typeAliases';
11
11
  import type { string_title } from '../../types/typeAliases';
12
- import type { RemoteLlmExecutionToolsOptions } from './RemoteLlmExecutionToolsOptions';
12
+ import type { RemoteLlmExecutionToolsOptions } from './interfaces/RemoteLlmExecutionToolsOptions';
13
13
  /**
14
14
  * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
15
15
  *
@@ -50,8 +50,9 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
50
50
  listModels(): Promise<Array<AvailableModel>>;
51
51
  }
52
52
  /**
53
+ * TODO: [🍜] !!!!!! Default remote remoteUrl and path for anonymous server
53
54
  * TODO: [🍓] Allow to list compatible models with each variant
54
55
  * TODO: [ðŸ—Ŋ] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
55
- * TODO: [🍜] Add anonymous option
56
56
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
57
+ * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
57
58
  */
@@ -3,9 +3,9 @@
3
3
  *
4
4
  * This is sent from server to client when error occurs and stops the process
5
5
  */
6
- export interface Promptbook_Server_Error {
6
+ export type Promptbook_Server_Error = {
7
7
  /**
8
8
  * The error message which caused the error
9
9
  */
10
10
  readonly errorMessage: string;
11
- }
11
+ };
@@ -4,9 +4,9 @@ import type { TaskProgress } from '../../../types/TaskProgress';
4
4
  *
5
5
  * This is sent from server to client arbitrarily and may be sent multiple times
6
6
  */
7
- export interface Promptbook_Server_Progress {
7
+ export type Promptbook_Server_Progress = {
8
8
  /**
9
9
  * The progress of text generation
10
10
  */
11
11
  readonly taskProgress: TaskProgress;
12
- }
12
+ };
@@ -1,11 +1,13 @@
1
1
  import type { Prompt } from '../../../types/Prompt';
2
2
  import type { client_id } from '../../../types/typeAliases';
3
+ import type { LlmToolsConfiguration } from '../../_common/LlmToolsConfiguration';
3
4
  /**
4
5
  * Socket.io progress for remote text generation
5
6
  *
6
7
  * This is a request from client to server
7
8
  */
8
- export interface Promptbook_Server_Request {
9
+ export type Promptbook_Server_Request = Promptbook_Server_CollectionRequest | Promptbook_Server_AnonymousRequest;
10
+ export type Promptbook_Server_CollectionRequest = {
9
11
  /**
10
12
  * Client responsible for the requests
11
13
  */
@@ -14,4 +16,14 @@ export interface Promptbook_Server_Request {
14
16
  * The Prompt to execute
15
17
  */
16
18
  readonly prompt: Prompt;
17
- }
19
+ };
20
+ export type Promptbook_Server_AnonymousRequest = {
21
+ /**
22
+ * Configuration for the LLM tools
23
+ */
24
+ readonly llmToolsConfiguration: LlmToolsConfiguration;
25
+ /**
26
+ * The Prompt to execute
27
+ */
28
+ readonly prompt: Prompt;
29
+ };
@@ -0,0 +1,49 @@
1
+ import type { CommonExecutionToolsOptions } from '../../../execution/CommonExecutionToolsOptions';
2
+ import type { client_id } from '../../../types/typeAliases';
3
+ import type { string_base_url } from '../../../types/typeAliases';
4
+ import type { string_uri } from '../../../types/typeAliases';
5
+ import type { LlmToolsConfiguration } from '../../_common/LlmToolsConfiguration';
6
+ /**
7
+ * Options for `RemoteLlmExecutionTools`
8
+ *
9
+ * @public exported from `@promptbook/remote-client`
10
+ */
11
+ export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
12
+ /**
13
+ * URL of the remote PROMPTBOOK server
14
+ * On this server will be connected to the socket.io server
15
+ */
16
+ readonly remoteUrl: string_base_url;
17
+ /**
18
+ * Path for the Socket.io server to listen
19
+ *
20
+ * @default '/socket.io'
21
+ * @example '/promptbook/socket.io'
22
+ */
23
+ readonly path: string_uri;
24
+ /**
25
+ * Mode of the server to connect to
26
+ */
27
+ isAnonymous: boolean;
28
+ } & ({
29
+ /**
30
+ * Use anonymous server with anonymous mode
31
+ */
32
+ isAnonymous: true;
33
+ /**
34
+ * Configuration for the LLM tools
35
+ */
36
+ readonly llmToolsConfiguration: LlmToolsConfiguration;
37
+ } | {
38
+ /**
39
+ * Use anonymous server with client identification and fixed collection
40
+ */
41
+ isAnonymous: false;
42
+ /**
43
+ * Your client ID
44
+ */
45
+ readonly clientId: client_id;
46
+ });
47
+ /**
48
+ * TODO: [🍜] !!!!!! Default remote remoteUrl and path for anonymous server
49
+ */
@@ -6,6 +6,14 @@ import type { string_uri } from '../../../types/typeAliases';
6
6
  /**
7
7
  * @@@
8
8
  *
9
+ * There are two modes of remote server:
10
+ *
11
+ * 1) **Collection mode** Server will recieve `collection` and execute prompts only from this collection
12
+ * 2) **Anonymous mode** Server will recieve full `LlmToolsConfiguration` (with api keys) and just acts as a proxy
13
+ * In anonymous mode, `collection` will be ignored and any prompt will be executed
14
+ *
15
+ * You can enable both modes at the same time.
16
+ *
9
17
  * @public exported from `@promptbook/remote-client`
10
18
  * @public exported from `@promptbook/remote-server`
11
19
  */
@@ -21,10 +29,22 @@ export type RemoteServerOptions = CommonExecutionToolsOptions & {
21
29
  * @example '/promptbook/socket.io'
22
30
  */
23
31
  readonly path: string_uri;
32
+ } & (AnonymousRemoteServerOptions | CollectionRemoteServerOptions | (AnonymousRemoteServerOptions & CollectionRemoteServerOptions));
33
+ export type AnonymousRemoteServerOptions = {
34
+ /**
35
+ * Enable anonymous mode
36
+ */
37
+ readonly isAnonymousModeAllowed: true;
38
+ };
39
+ export type CollectionRemoteServerOptions = {
40
+ /**
41
+ * Enable collection mode
42
+ */
43
+ readonly isCollectionModeAllowed: true;
24
44
  /**
25
45
  * Promptbook collection to use
26
46
  *
27
- * This is used to check validity of the prompt to prevent DDoS
47
+ * This is used to check validity of the prompt to prevent misuse
28
48
  */
29
49
  readonly collection: PipelineCollection;
30
50
  /**
@@ -33,5 +53,6 @@ export type RemoteServerOptions = CommonExecutionToolsOptions & {
33
53
  createLlmExecutionTools(clientId: client_id): LlmExecutionTools;
34
54
  };
35
55
  /**
36
- * TODO: [🍜] Add anonymous option
56
+ * TODO: Constrain anonymous mode for specific models / providers
57
+ * TODO: [🧠][ðŸĪš] Remove `createLlmExecutionTools`, pass just `llmExecutionTools`
37
58
  */
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env ts-node
2
+ export {};
@@ -11,11 +11,12 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
11
11
  */
12
12
  export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
13
13
  /**
14
- * TODO: [🍜] Add anonymous option
14
+ * TODO: [🍜] !!!!!! Add anonymous option
15
15
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
16
16
  * TODO: Handle progress - support streaming
17
17
  * TODO: [ðŸ—Ŋ] Do not hang up immediately but wait until client closes OR timeout
18
18
  * TODO: [ðŸ—Ŋ] Timeout on chat to free up resources
19
19
  * TODO: [🃏] Pass here some security token to prevent malitious usage and/or DDoS
20
20
  * TODO: [0] Set unavailable models as undefined in `RemoteLlmExecutionTools` NOT throw error here
21
+ * TODO: Constrain anonymous mode for specific models / providers
21
22
  */
@@ -286,6 +286,12 @@ export type string_css_selector = string;
286
286
  * For example `"https://collboard.com/9SeSQTupmQHwuSrLi"`
287
287
  */
288
288
  export type string_url = string;
289
+ /**
290
+ * Semantic helper
291
+ *
292
+ * For example `"https://collboard.com"`
293
+ */
294
+ export type string_base_url = string;
289
295
  /**
290
296
  * Semantic helper
291
297
  *
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/openai",
3
- "version": "0.65.0-2",
3
+ "version": "0.65.0-4",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  "module": "./esm/index.es.js",
48
48
  "typings": "./esm/typings/src/_packages/openai.index.d.ts",
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.65.0-2"
50
+ "@promptbook/core": "0.65.0-4"
51
51
  },
52
52
  "dependencies": {
53
53
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -14,7 +14,7 @@
14
14
  /**
15
15
  * The version of the Promptbook library
16
16
  */
17
- var PROMPTBOOK_VERSION = '0.65.0-1';
17
+ var PROMPTBOOK_VERSION = '0.65.0-3';
18
18
  // TODO: !!!! List here all the versions and annotate + put into script
19
19
 
20
20
  /*! *****************************************************************************
@@ -1,26 +0,0 @@
1
- import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
2
- import type { client_id } from '../../types/typeAliases';
3
- import type { string_uri } from '../../types/typeAliases';
4
- /**
5
- * Options for `RemoteLlmExecutionTools`
6
- *
7
- * @public exported from `@promptbook/remote-client`
8
- */
9
- export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
10
- /**
11
- * URL of the remote PROMPTBOOK server
12
- * On this server will be connected to the socket.io server
13
- */
14
- readonly remoteUrl: URL;
15
- /**
16
- * Path for the Socket.io server to listen
17
- *
18
- * @default '/socket.io'
19
- * @example '/promptbook/socket.io'
20
- */
21
- readonly path: string_uri;
22
- /**
23
- * Your client ID
24
- */
25
- readonly clientId: client_id;
26
- };