@promptbook/legacy-documents 0.71.0-17 → 0.72.0-11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/esm/index.es.js +230 -203
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +8 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +8 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +8 -2
  7. package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
  8. package/esm/typings/src/dialogs/simple-prompt/SimplePromptInterfaceTools.d.ts +1 -1
  9. package/esm/typings/src/execution/translation/automatic-translate/automatic-translators/LindatAutomaticTranslator.d.ts +1 -1
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +0 -56
  11. package/esm/typings/src/execution/utils/usage-constants.d.ts +127 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +2 -2
  14. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionToolsOptions.d.ts +2 -1
  16. package/esm/typings/src/llm-providers/langtail/LangtailExecutionTools.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  19. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +37 -0
  20. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +14 -0
  21. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +11 -2
  22. package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +2 -2
  23. package/esm/typings/src/llm-providers/openai/createOpenAiAssistantExecutionTools.d.ts +15 -0
  24. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +9 -0
  25. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +9 -0
  26. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +3 -3
  27. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +15 -6
  28. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Response.d.ts +2 -2
  29. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_Prompt_Request.d.ts +6 -12
  30. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_Prompt_Response.d.ts +1 -1
  31. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +9 -14
  32. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +22 -8
  33. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -1
  34. package/esm/typings/src/scrapers/_common/utils/makeKnowledgeSourceHandler.d.ts +1 -1
  35. package/esm/typings/src/scripting/javascript/JavascriptEvalExecutionTools.d.ts +1 -1
  36. package/esm/typings/src/scripting/python/PythonExecutionTools.d.ts +1 -1
  37. package/esm/typings/src/scripting/typescript/TypescriptExecutionTools.d.ts +1 -1
  38. package/esm/typings/src/storage/file-cache-storage/FileCacheStorage.d.ts +1 -1
  39. package/esm/typings/src/types/Prompt.d.ts +1 -0
  40. package/esm/typings/src/types/typeAliases.d.ts +11 -8
  41. package/package.json +2 -2
  42. package/umd/index.umd.js +230 -203
  43. package/umd/index.umd.js.map +1 -1
@@ -1,21 +1,30 @@
1
+ import type { string_user_id } from '../../../types/typeAliases';
1
2
  import type { LlmToolsConfiguration } from '../../_common/register/LlmToolsConfiguration';
3
+ import type { CollectionRemoteServerClientOptions } from './RemoteServerOptions';
2
4
  /**
3
5
  * Socket.io progress for remote text generation
4
6
  *
5
7
  * This is a request from client to server
6
8
  */
7
- export type PromptbookServer_ListModels_Request = PromptbookServer_ListModels_CollectionRequest | PromptbookServer_ListModels_AnonymousRequest;
8
- export type PromptbookServer_ListModels_CollectionRequest = {
9
+ export type PromptbookServer_ListModels_Request<TCustomOptions> = PromptbookServer_ListModels_CollectionRequest<TCustomOptions> | PromptbookServer_ListModels_AnonymousRequest;
10
+ export type PromptbookServer_ListModels_CollectionRequest<TCustomOptions> = CollectionRemoteServerClientOptions<TCustomOptions> & {
9
11
  /**
10
- * Collection mode
12
+ * Application mode
11
13
  */
12
- isAnonymous: false;
14
+ readonly isAnonymous: false;
13
15
  };
14
16
  export type PromptbookServer_ListModels_AnonymousRequest = {
15
17
  /**
16
18
  * Anonymous mode
17
19
  */
18
- isAnonymous: true;
20
+ readonly isAnonymous: true;
21
+ /**
22
+ * Identifier of the end user
23
+ *
24
+ * Note: this is passed to the certain model providers to identify misuse
25
+ * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
26
+ */
27
+ readonly userId: string_user_id | null;
19
28
  /**
20
29
  * Configuration for the LLM tools
21
30
  */
@@ -24,6 +33,6 @@ export type PromptbookServer_ListModels_AnonymousRequest = {
24
33
  /**
25
34
  * TODO: [ðŸ‘Ą] DRY `PromptbookServer_Prompt_Request` and `PromptbookServer_ListModels_Request`
26
35
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
27
- * TODO: [🧠][ðŸĪš] Pass `userId` in `PromptbookServer_ListModels_Request`
36
+ * TODO: [🧠][ðŸĪš] Maybe allow overriding of `userId` for each prompt - Pass `userId` in `PromptbookServer_ListModels_Request`
28
37
  * TODO: [👒] Listing models (and checking configuration) probbably should go through REST API not Socket.io
29
38
  */
@@ -8,8 +8,8 @@ export interface PromptbookServer_ListModels_Response {
8
8
  /**
9
9
  * Available models that can be used
10
10
  */
11
- models: Array<AvailableModel>;
11
+ readonly models: Array<AvailableModel>;
12
12
  }
13
13
  /**
14
14
  * TODO: [👒] Listing models (and checking configuration) probbably should go through REST API not Socket.io
15
- */
15
+ */
@@ -1,24 +1,18 @@
1
1
  import type { Prompt } from '../../../types/Prompt';
2
2
  import type { string_user_id } from '../../../types/typeAliases';
3
3
  import type { LlmToolsConfiguration } from '../../_common/register/LlmToolsConfiguration';
4
+ import type { CollectionRemoteServerClientOptions } from './RemoteServerOptions';
4
5
  /**
5
6
  * Socket.io progress for remote text generation
6
7
  *
7
8
  * This is a request from client to server
8
9
  */
9
- export type PromptbookServer_Prompt_Request = PromptbookServer_Prompt_CollectionRequest | PromptbookServer_Prompt_AnonymousRequest;
10
- export type PromptbookServer_Prompt_CollectionRequest = {
10
+ export type PromptbookServer_Prompt_Request<TCustomOptions> = PromptbookServer_Prompt_CollectionRequest<TCustomOptions> | PromptbookServer_Prompt_AnonymousRequest;
11
+ export type PromptbookServer_Prompt_CollectionRequest<TCustomOptions> = CollectionRemoteServerClientOptions<TCustomOptions> & {
11
12
  /**
12
- * Collection mode
13
+ * Application mode
13
14
  */
14
15
  readonly isAnonymous: false;
15
- /**
16
- * Identifier of the end user
17
- *
18
- * Note: this is passed to the certain model providers to identify misuse
19
- * Note: In anonymous mode it is not required to identify
20
- */
21
- readonly userId: string_user_id;
22
16
  /**
23
17
  * The Prompt to execute
24
18
  */
@@ -33,9 +27,9 @@ export type PromptbookServer_Prompt_AnonymousRequest = {
33
27
  * Identifier of the end user
34
28
  *
35
29
  * Note: this is passed to the certain model providers to identify misuse
36
- * Note: In anonymous mode it is not required to identify
30
+ * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode)
37
31
  */
38
- readonly userId?: string_user_id;
32
+ readonly userId: string_user_id | null;
39
33
  /**
40
34
  * Configuration for the LLM tools
41
35
  */
@@ -8,5 +8,5 @@ export interface PromptbookServer_Prompt_Response {
8
8
  /**
9
9
  * The result of the prompt
10
10
  */
11
- promptResult: PromptResult;
11
+ readonly promptResult: PromptResult;
12
12
  }
@@ -3,12 +3,13 @@ import type { string_base_url } from '../../../types/typeAliases';
3
3
  import type { string_uri } from '../../../types/typeAliases';
4
4
  import type { string_user_id } from '../../../types/typeAliases';
5
5
  import type { LlmToolsConfiguration } from '../../_common/register/LlmToolsConfiguration';
6
+ import type { CollectionRemoteServerClientOptions } from './RemoteServerOptions';
6
7
  /**
7
8
  * Options for `RemoteLlmExecutionTools`
8
9
  *
9
10
  * @public exported from `@promptbook/remote-client`
10
11
  */
11
- export type RemoteLlmExecutionToolsOptions = CommonToolsOptions & {
12
+ export type RemoteLlmExecutionToolsOptions<TCustomOptions> = CommonToolsOptions & {
12
13
  /**
13
14
  * URL of the remote PROMPTBOOK server
14
15
  * On this server will be connected to the socket.io server
@@ -29,7 +30,7 @@ export type RemoteLlmExecutionToolsOptions = CommonToolsOptions & {
29
30
  /**
30
31
  * Use anonymous server with anonymous mode
31
32
  */
32
- isAnonymous: true;
33
+ readonly isAnonymous: true;
33
34
  /**
34
35
  * Configuration for the LLM tools
35
36
  */
@@ -37,22 +38,16 @@ export type RemoteLlmExecutionToolsOptions = CommonToolsOptions & {
37
38
  /**
38
39
  * Identifier of the end user
39
40
  *
40
- * Note: this is passed to the certain model providers to identify misuse
41
- * Note: In anonymous mode it is not required to identify
41
+ * Note: This is passed to the certain model providers to identify misuse
42
+ * Note: In anonymous mode, there is no need to identify yourself, nor does it change the actual configuration of LLM Tools (unlike in application mode).
42
43
  */
43
- readonly userId?: string_user_id;
44
- } | {
44
+ readonly userId: string_user_id | null;
45
+ } | ({
45
46
  /**
46
47
  * Use anonymous server with client identification and fixed collection
47
48
  */
48
- isAnonymous: false;
49
- /**
50
- * Identifier of the end user
51
- *
52
- * Note: this is passed to the certain model providers to identify misuse
53
- */
54
- readonly userId: string_user_id;
55
- });
49
+ readonly isAnonymous: false;
50
+ } & CollectionRemoteServerClientOptions<TCustomOptions>));
56
51
  /**
57
52
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
58
53
  * TODO: [🧠][🧜‍♂ïļ] Maybe join remoteUrl and path into single value
@@ -1,6 +1,7 @@
1
1
  import type { PipelineCollection } from '../../../collection/PipelineCollection';
2
2
  import type { CommonToolsOptions } from '../../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../../execution/LlmExecutionTools';
4
+ import type { string_app_id } from '../../../types/typeAliases';
4
5
  import type { string_uri } from '../../../types/typeAliases';
5
6
  import type { string_user_id } from '../../../types/typeAliases';
6
7
  /**
@@ -8,7 +9,7 @@ import type { string_user_id } from '../../../types/typeAliases';
8
9
  *
9
10
  * There are two modes of remote server:
10
11
  *
11
- * 1) **Collection mode** Server will recieve `collection` and execute prompts only from this collection
12
+ * 1) **Application mode** Server will recieve `collection` and execute prompts only from this collection
12
13
  * 2) **Anonymous mode** Server will recieve full `LlmToolsConfiguration` (with api keys) and just acts as a proxy
13
14
  * In anonymous mode, `collection` will be ignored and any prompt will be executed
14
15
  *
@@ -17,7 +18,7 @@ import type { string_user_id } from '../../../types/typeAliases';
17
18
  * @public exported from `@promptbook/remote-client`
18
19
  * @public exported from `@promptbook/remote-server`
19
20
  */
20
- export type RemoteServerOptions = CommonToolsOptions & {
21
+ export type RemoteServerOptions<TCustomOptions> = CommonToolsOptions & {
21
22
  /**
22
23
  * Port on which the server will listen
23
24
  */
@@ -29,18 +30,18 @@ export type RemoteServerOptions = CommonToolsOptions & {
29
30
  * @example '/promptbook/socket.io'
30
31
  */
31
32
  readonly path: string_uri;
32
- } & (AnonymousRemoteServerOptions | CollectionRemoteServerOptions | (AnonymousRemoteServerOptions & CollectionRemoteServerOptions));
33
+ } & (AnonymousRemoteServerOptions | CollectionRemoteServerOptions<TCustomOptions> | (AnonymousRemoteServerOptions & CollectionRemoteServerOptions<TCustomOptions>));
33
34
  export type AnonymousRemoteServerOptions = {
34
35
  /**
35
36
  * Enable anonymous mode
36
37
  */
37
38
  readonly isAnonymousModeAllowed: true;
38
39
  };
39
- export type CollectionRemoteServerOptions = {
40
+ export type CollectionRemoteServerOptions<TCustomOptions> = {
40
41
  /**
41
- * Enable collection mode
42
+ * Enable application mode
42
43
  */
43
- readonly isCollectionModeAllowed: true;
44
+ readonly isApplicationModeAllowed: true;
44
45
  /**
45
46
  * Promptbook collection to use
46
47
  *
@@ -50,9 +51,22 @@ export type CollectionRemoteServerOptions = {
50
51
  /**
51
52
  * Creates llm execution tools for each client
52
53
  */
53
- createLlmExecutionTools(userId: string_user_id | undefined): LlmExecutionTools;
54
+ createLlmExecutionTools(options: CollectionRemoteServerClientOptions<TCustomOptions>): LlmExecutionTools;
55
+ };
56
+ export type CollectionRemoteServerClientOptions<TCustomOptions> = {
57
+ /**
58
+ * @@@
59
+ */
60
+ readonly appId: string_app_id | null;
61
+ /**
62
+ * @@@
63
+ */
64
+ readonly userId: string_user_id | null;
65
+ /**
66
+ * @@@
67
+ */
68
+ readonly customOptions?: TCustomOptions;
54
69
  };
55
70
  /**
56
71
  * TODO: Constrain anonymous mode for specific models / providers
57
- * TODO: [🧠][ðŸĪš] Remove `createLlmExecutionTools`, pass just `llmExecutionTools`
58
72
  */
@@ -9,7 +9,7 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
9
9
  * @see https://github.com/webgptorg/promptbook#remote-server
10
10
  * @public exported from `@promptbook/remote-server`
11
11
  */
12
- export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
12
+ export declare function startRemoteServer<TCustomOptions = undefined>(options: RemoteServerOptions<TCustomOptions>): IDestroyable;
13
13
  /**
14
14
  * TODO: Maybe use `$asDeeplyFrozenSerializableJson`
15
15
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
@@ -6,6 +6,6 @@ import type { ScraperSourceHandler } from '../Scraper';
6
6
  /**
7
7
  * @@@
8
8
  *
9
- * @private for scraper utilities
9
+ * @public exported from `@promptbook/core`
10
10
  */
11
11
  export declare function makeKnowledgeSourceHandler(knowledgeSource: SetOptional<KnowledgeSourceJson, 'name'>, tools: Pick<ExecutionTools, 'fs'>, options?: Pick<PrepareAndScrapeOptions, 'rootDirname' | 'isVerbose'>): Promise<ScraperSourceHandler>;
@@ -9,7 +9,7 @@ import type { JavascriptExecutionToolsOptions } from './JavascriptExecutionTools
9
9
  * @public exported from `@promptbook/execute-javascript`
10
10
  */
11
11
  export declare class JavascriptEvalExecutionTools implements ScriptExecutionTools {
12
- private readonly options;
12
+ protected readonly options: JavascriptExecutionToolsOptions;
13
13
  constructor(options?: JavascriptExecutionToolsOptions);
14
14
  /**
15
15
  * Executes a JavaScript
@@ -9,7 +9,7 @@ import type { ScriptExecutionToolsExecuteOptions } from '../../execution/ScriptE
9
9
  * @private still in development
10
10
  */
11
11
  export declare class PythonExecutionTools implements ScriptExecutionTools {
12
- private readonly options;
12
+ protected readonly options: CommonToolsOptions;
13
13
  constructor(options?: CommonToolsOptions);
14
14
  /**
15
15
  * Executes a Python
@@ -9,7 +9,7 @@ import type { ScriptExecutionToolsExecuteOptions } from '../../execution/ScriptE
9
9
  * @private still in development
10
10
  */
11
11
  export declare class TypescriptExecutionTools implements ScriptExecutionTools {
12
- private readonly options;
12
+ protected readonly options: CommonToolsOptions;
13
13
  constructor(options?: CommonToolsOptions);
14
14
  /**
15
15
  * Executes a TypeScript
@@ -7,7 +7,7 @@ import type { FileCacheStorageOptions } from './FileCacheStorageOptions';
7
7
  * @public exported from `@promptbook/node`
8
8
  */
9
9
  export declare class FileCacheStorage<TItem> implements PromptbookStorage<TItem> {
10
- private readonly tools;
10
+ protected readonly tools: Required<Pick<ExecutionTools, 'fs'>>;
11
11
  private readonly options;
12
12
  constructor(tools: Required<Pick<ExecutionTools, 'fs'>>, options: FileCacheStorageOptions);
13
13
  /**
@@ -107,4 +107,5 @@ export type CommonPrompt = {
107
107
  * TODO: [🧄] Replace all "github.com/webgptorg/promptbook#xxx" with "ptbk.io/xxx"
108
108
  * TODO: [✔] Check ModelRequirements in runtime
109
109
  * TODO: [ðŸģ] Add options for translation - maybe create `TranslationPrompt`
110
+ * TODO: [🧠][ðŸĪš] Maybe allow overriding of `userId` for each prompt
110
111
  */
@@ -350,12 +350,6 @@ export type string_uri = string;
350
350
  * For example `"9SeSQTupmQHwuSrLi"`
351
351
  */
352
352
  export type string_uri_part = string;
353
- /**
354
- * Semantic helper, ID of the board used in URL and API
355
- *
356
- * For example `"9SeSQTupmQHwuSrLi"` <- TODO: !! Update
357
- */
358
- export type string_uriid = string_uri_part;
359
353
  /**
360
354
  * Semantic helper
361
355
  *
@@ -399,9 +393,17 @@ export type string_uuid = string & {
399
393
  readonly _type: 'uuid';
400
394
  };
401
395
  /**
402
- * End user identifier;
396
+ * Application identifier
397
+ *
398
+ * @@@
399
+ */
400
+ export type string_app_id = id;
401
+ /**
402
+ * End user identifier
403
+ *
404
+ * @@@
403
405
  */
404
- export type string_user_id = string;
406
+ export type string_user_id = id;
405
407
  /**
406
408
  * Semantic helper
407
409
  *
@@ -523,6 +525,7 @@ export type string_javascript_name = string;
523
525
  * Semantic helper; For example "unwrapResult" or "spaceTrim"
524
526
  */
525
527
  export type string_postprocessing_function_name = string;
528
+ export type id = string | number;
526
529
  export type string_token = string;
527
530
  export type string_license_token = string_token;
528
531
  export type string_password = string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/legacy-documents",
3
- "version": "0.71.0-17",
3
+ "version": "0.72.0-11",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  "module": "./esm/index.es.js",
52
52
  "typings": "./esm/typings/src/_packages/legacy-documents.index.d.ts",
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.71.0-17"
54
+ "@promptbook/core": "0.72.0-11"
55
55
  },
56
56
  "dependencies": {
57
57
  "colors": "1.4.0",