@promptbook/markdown-utils 0.62.0-0 → 0.62.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/esm/typings/promptbook-collection/index.d.ts +3 -3
  2. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  3. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +3 -0
  4. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -1
  5. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -1
  6. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +6 -0
  7. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +3 -2
  8. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  10. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  11. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  12. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  13. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  14. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  15. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  16. package/package.json +2 -2
  17. package/umd/typings/promptbook-collection/index.d.ts +3 -3
  18. package/umd/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  19. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +3 -0
  20. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -1
  21. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -1
  22. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +6 -0
  23. package/umd/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +3 -2
  24. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  25. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  26. package/umd/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  27. package/umd/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  28. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  29. package/umd/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  30. package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  31. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
@@ -26,7 +26,7 @@ declare const _default: ({
26
26
  preparations: {
27
27
  id: number;
28
28
  promptbookVersion: string;
29
- modelUsage: {
29
+ usage: {
30
30
  price: {
31
31
  value: number;
32
32
  };
@@ -113,7 +113,7 @@ declare const _default: ({
113
113
  preparations: {
114
114
  id: number;
115
115
  promptbookVersion: string;
116
- modelUsage: {
116
+ usage: {
117
117
  price: {
118
118
  value: number;
119
119
  };
@@ -195,7 +195,7 @@ declare const _default: ({
195
195
  preparations: {
196
196
  id: number;
197
197
  promptbookVersion: string;
198
- modelUsage: {
198
+ usage: {
199
199
  price: {
200
200
  value: number;
201
201
  };
@@ -58,7 +58,7 @@ export declare function createPipelineExecutor(options: CreatePipelineExecutorOp
58
58
  export {};
59
59
  /**
60
60
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
61
- * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
61
+ * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
62
62
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
63
63
  * TODO: [♈] Probbably move expectations from templates to parameters
64
64
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -12,6 +12,8 @@ export type CreateLlmToolsFromEnvOptions = {
12
12
  *
13
13
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
14
14
  *
15
+ * @@@ .env
16
+ *
15
17
  * It looks for environment variables:
16
18
  * - `process.env.OPENAI_API_KEY`
17
19
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -20,6 +22,7 @@ export type CreateLlmToolsFromEnvOptions = {
20
22
  */
21
23
  export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
22
24
  /**
25
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
23
26
  * TODO: [🔼] !!! Export via `@promptbook/node`
24
27
  * TODO: @@@ write discussion about this - wizzard
25
28
  * TODO: Add Azure
@@ -1,10 +1,19 @@
1
1
  import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
2
+ type GetLlmToolsForCliOptions = {
3
+ /**
4
+ * @@@
5
+ *
6
+ * @default false
7
+ */
8
+ isCacheReloaded?: boolean;
9
+ };
2
10
  /**
3
11
  * Returns LLM tools for CLI
4
12
  *
5
13
  * @private within the repository - for CLI utils
6
14
  */
7
- export declare function getLlmToolsForCli(): LlmExecutionToolsWithTotalUsage;
15
+ export declare function getLlmToolsForCli(options?: GetLlmToolsForCliOptions): LlmExecutionToolsWithTotalUsage;
16
+ export {};
8
17
  /**
9
18
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
10
19
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -1,11 +1,20 @@
1
1
  import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
2
2
  import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
3
+ type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromEnvOptions & {
4
+ /**
5
+ * @@@
6
+ *
7
+ * @default false
8
+ */
9
+ isCacheReloaded?: boolean;
10
+ };
3
11
  /**
4
12
  * Returns LLM tools for testing purposes
5
13
  *
6
14
  * @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
7
15
  */
8
- export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionToolsWithTotalUsage;
16
+ export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: GetLlmToolsForTestingAndScriptsAndPlaygroundOptions): LlmExecutionToolsWithTotalUsage;
17
+ export {};
9
18
  /**
10
19
  * Note: [⚪] This should never be in any released package
11
20
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -7,4 +7,10 @@ export type CacheLlmToolsOptions = {
7
7
  * @default MemoryStorage
8
8
  */
9
9
  storage: PromptbookStorage<CacheItem>;
10
+ /**
11
+ * @@@
12
+ *
13
+ * @default false
14
+ */
15
+ isReloaded?: boolean;
10
16
  };
@@ -5,10 +5,11 @@ import type { PromptResultUsage } from '../../../../execution/PromptResultUsage'
5
5
  */
6
6
  export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
7
7
  /**
8
- * Total cost of the execution
8
+ * Get total cost of the execution up to this point
9
9
  */
10
- totalUsage: PromptResultUsage;
10
+ getTotalUsage(): PromptResultUsage;
11
11
  };
12
12
  /**
13
13
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
14
+ * Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
14
15
  */
@@ -46,4 +46,5 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
46
46
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
47
47
  * TODO: Maybe make custom OpenaiError
48
48
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
49
+ * TODO: [🍜] Auto use anonymous server in browser
49
50
  */
@@ -6,3 +6,6 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutio
6
6
  * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
7
  */
8
8
  export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
9
+ /**
10
+ * TODO: [🍜] Auto add WebGPT / Promptbook.studio anonymous server in browser
11
+ */
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
+ * TODO: [🍜] Playground with WebGPT / Promptbook.studio anonymous server
4
5
  * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
6
  */
@@ -51,4 +51,5 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
51
51
  /**
52
52
  * TODO: [🍓] Allow to list compatible models with each variant
53
53
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
54
- */
54
+ * TODO: [🍜] Add anonymous option
55
+ */
@@ -26,3 +26,6 @@ export type RemoteServerOptions = CommonExecutionToolsOptions & {
26
26
  */
27
27
  createLlmExecutionTools(clientId: client_id): LlmExecutionTools;
28
28
  };
29
+ /**
30
+ * TODO: [🍜] Add anonymous option
31
+ */
@@ -10,6 +10,7 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
10
10
  */
11
11
  export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
12
12
  /**
13
+ * TODO: [🍜] Add anonymous option
13
14
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
14
15
  * TODO: Handle progress - support streaming
15
16
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout
@@ -13,7 +13,7 @@ export type PreparationJson = {
13
13
  /**
14
14
  * Usage of the prompt execution
15
15
  */
16
- readonly modelUsage: PromptResultUsage;
16
+ readonly usage: PromptResultUsage;
17
17
  };
18
18
  /**
19
19
  * TODO: [🍙] Make some standart order of json properties
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/markdown-utils",
3
- "version": "0.62.0-0",
3
+ "version": "0.62.0",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -47,7 +47,7 @@
47
47
  }
48
48
  ],
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.62.0-0"
50
+ "@promptbook/core": "0.62.0"
51
51
  },
52
52
  "main": "./umd/index.umd.js",
53
53
  "module": "./esm/index.es.js",
@@ -26,7 +26,7 @@ declare const _default: ({
26
26
  preparations: {
27
27
  id: number;
28
28
  promptbookVersion: string;
29
- modelUsage: {
29
+ usage: {
30
30
  price: {
31
31
  value: number;
32
32
  };
@@ -113,7 +113,7 @@ declare const _default: ({
113
113
  preparations: {
114
114
  id: number;
115
115
  promptbookVersion: string;
116
- modelUsage: {
116
+ usage: {
117
117
  price: {
118
118
  value: number;
119
119
  };
@@ -195,7 +195,7 @@ declare const _default: ({
195
195
  preparations: {
196
196
  id: number;
197
197
  promptbookVersion: string;
198
- modelUsage: {
198
+ usage: {
199
199
  price: {
200
200
  value: number;
201
201
  };
@@ -58,7 +58,7 @@ export declare function createPipelineExecutor(options: CreatePipelineExecutorOp
58
58
  export {};
59
59
  /**
60
60
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
61
- * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
61
+ * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
62
62
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
63
63
  * TODO: [♈] Probbably move expectations from templates to parameters
64
64
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -12,6 +12,8 @@ export type CreateLlmToolsFromEnvOptions = {
12
12
  *
13
13
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
14
14
  *
15
+ * @@@ .env
16
+ *
15
17
  * It looks for environment variables:
16
18
  * - `process.env.OPENAI_API_KEY`
17
19
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -20,6 +22,7 @@ export type CreateLlmToolsFromEnvOptions = {
20
22
  */
21
23
  export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
22
24
  /**
25
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
23
26
  * TODO: [🔼] !!! Export via `@promptbook/node`
24
27
  * TODO: @@@ write discussion about this - wizzard
25
28
  * TODO: Add Azure
@@ -1,10 +1,19 @@
1
1
  import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
2
+ type GetLlmToolsForCliOptions = {
3
+ /**
4
+ * @@@
5
+ *
6
+ * @default false
7
+ */
8
+ isCacheReloaded?: boolean;
9
+ };
2
10
  /**
3
11
  * Returns LLM tools for CLI
4
12
  *
5
13
  * @private within the repository - for CLI utils
6
14
  */
7
- export declare function getLlmToolsForCli(): LlmExecutionToolsWithTotalUsage;
15
+ export declare function getLlmToolsForCli(options?: GetLlmToolsForCliOptions): LlmExecutionToolsWithTotalUsage;
16
+ export {};
8
17
  /**
9
18
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
10
19
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -1,11 +1,20 @@
1
1
  import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
2
2
  import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
3
+ type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromEnvOptions & {
4
+ /**
5
+ * @@@
6
+ *
7
+ * @default false
8
+ */
9
+ isCacheReloaded?: boolean;
10
+ };
3
11
  /**
4
12
  * Returns LLM tools for testing purposes
5
13
  *
6
14
  * @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
7
15
  */
8
- export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionToolsWithTotalUsage;
16
+ export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: GetLlmToolsForTestingAndScriptsAndPlaygroundOptions): LlmExecutionToolsWithTotalUsage;
17
+ export {};
9
18
  /**
10
19
  * Note: [⚪] This should never be in any released package
11
20
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -7,4 +7,10 @@ export type CacheLlmToolsOptions = {
7
7
  * @default MemoryStorage
8
8
  */
9
9
  storage: PromptbookStorage<CacheItem>;
10
+ /**
11
+ * @@@
12
+ *
13
+ * @default false
14
+ */
15
+ isReloaded?: boolean;
10
16
  };
@@ -5,10 +5,11 @@ import type { PromptResultUsage } from '../../../../execution/PromptResultUsage'
5
5
  */
6
6
  export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
7
7
  /**
8
- * Total cost of the execution
8
+ * Get total cost of the execution up to this point
9
9
  */
10
- totalUsage: PromptResultUsage;
10
+ getTotalUsage(): PromptResultUsage;
11
11
  };
12
12
  /**
13
13
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
14
+ * Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
14
15
  */
@@ -46,4 +46,5 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
46
46
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
47
47
  * TODO: Maybe make custom OpenaiError
48
48
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
49
+ * TODO: [🍜] Auto use anonymous server in browser
49
50
  */
@@ -6,3 +6,6 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutio
6
6
  * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
7
  */
8
8
  export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
9
+ /**
10
+ * TODO: [🍜] Auto add WebGPT / Promptbook.studio anonymous server in browser
11
+ */
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
+ * TODO: [🍜] Playground with WebGPT / Promptbook.studio anonymous server
4
5
  * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
6
  */
@@ -51,4 +51,5 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
51
51
  /**
52
52
  * TODO: [🍓] Allow to list compatible models with each variant
53
53
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
54
- */
54
+ * TODO: [🍜] Add anonymous option
55
+ */
@@ -26,3 +26,6 @@ export type RemoteServerOptions = CommonExecutionToolsOptions & {
26
26
  */
27
27
  createLlmExecutionTools(clientId: client_id): LlmExecutionTools;
28
28
  };
29
+ /**
30
+ * TODO: [🍜] Add anonymous option
31
+ */
@@ -10,6 +10,7 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
10
10
  */
11
11
  export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
12
12
  /**
13
+ * TODO: [🍜] Add anonymous option
13
14
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
14
15
  * TODO: Handle progress - support streaming
15
16
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout
@@ -13,7 +13,7 @@ export type PreparationJson = {
13
13
  /**
14
14
  * Usage of the prompt execution
15
15
  */
16
- readonly modelUsage: PromptResultUsage;
16
+ readonly usage: PromptResultUsage;
17
17
  };
18
18
  /**
19
19
  * TODO: [🍙] Make some standart order of json properties