@promptbook/markdown-utils 0.61.0 → 0.62.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/esm/typings/promptbook-collection/index.d.ts +3 -3
  2. package/esm/typings/src/_packages/core.index.d.ts +2 -1
  3. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  4. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  5. package/esm/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  6. package/esm/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  7. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +4 -0
  8. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +12 -2
  9. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +12 -2
  10. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +7 -1
  11. package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  12. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +15 -0
  13. package/{umd/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → esm/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  14. package/esm/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  15. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  17. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  19. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  20. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  21. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  22. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  23. package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
  24. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  25. package/package.json +3 -3
  26. package/umd/typings/promptbook-collection/index.d.ts +3 -3
  27. package/umd/typings/src/_packages/core.index.d.ts +2 -1
  28. package/umd/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  29. package/umd/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  30. package/umd/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  31. package/umd/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  32. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +4 -0
  33. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +12 -2
  34. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +12 -2
  35. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +7 -1
  36. package/umd/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  37. package/umd/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +15 -0
  38. package/{esm/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → umd/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  39. package/umd/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  40. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  41. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  42. package/umd/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  43. package/umd/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  44. package/umd/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  45. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  46. package/umd/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  47. package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  48. package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
  49. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +0 -11
  51. package/umd/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +0 -11
@@ -26,7 +26,7 @@ declare const _default: ({
26
26
  preparations: {
27
27
  id: number;
28
28
  promptbookVersion: string;
29
- modelUsage: {
29
+ usage: {
30
30
  price: {
31
31
  value: number;
32
32
  };
@@ -113,7 +113,7 @@ declare const _default: ({
113
113
  preparations: {
114
114
  id: number;
115
115
  promptbookVersion: string;
116
- modelUsage: {
116
+ usage: {
117
117
  price: {
118
118
  value: number;
119
119
  };
@@ -195,7 +195,7 @@ declare const _default: ({
195
195
  preparations: {
196
196
  id: number;
197
197
  promptbookVersion: string;
198
- modelUsage: {
198
+ usage: {
199
199
  price: {
200
200
  value: number;
201
201
  };
@@ -26,6 +26,7 @@ import { embeddingVectorToString } from '../execution/embeddingVectorToString';
26
26
  import { addUsage } from '../execution/utils/addUsage';
27
27
  import { checkExpectations, isPassingExpectations } from '../execution/utils/checkExpectations';
28
28
  import { usageToWorktime } from '../execution/utils/usageToWorktime';
29
+ import { usageToHuman } from '../execution/utils/usageToHuman';
29
30
  import { CallbackInterfaceTools } from '../knowledge/dialogs/callback/CallbackInterfaceTools';
30
31
  import type { CallbackInterfaceToolsOptions } from '../knowledge/dialogs/callback/CallbackInterfaceToolsOptions';
31
32
  import { SimplePromptInterfaceTools } from '../knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools';
@@ -39,7 +40,7 @@ import { executionReportJsonToString } from '../types/execution-report/execution
39
40
  import { PROMPTBOOK_VERSION } from '../version';
40
41
  export { PROMPTBOOK_VERSION };
41
42
  export { BlockTypes, RESERVED_PARAMETER_NAMES };
42
- export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, };
43
+ export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, usageToHuman, };
43
44
  export { collectionToJson, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createSubcollection, };
44
45
  export { SimplePromptInterfaceTools };
45
46
  export { pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, stringifyPipelineJson, validatePipeline, };
@@ -10,5 +10,6 @@ export declare function pipelineJsonToString(pipelineJson: PipelineJson): Pipeli
10
10
  /**
11
11
  * TODO: !!!! Implement new features and commands into `promptTemplateParameterJsonToString`
12
12
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
13
- * TODO: Escape all
13
+ * TODO: [🏛] Maybe make some markdown builder
14
+ * TODO: [🏛] Escape all
14
15
  */
@@ -58,6 +58,7 @@ export declare function createPipelineExecutor(options: CreatePipelineExecutorOp
58
58
  export {};
59
59
  /**
60
60
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
61
+ * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
61
62
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
62
63
  * TODO: [♈] Probbably move expectations from templates to parameters
63
64
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -0,0 +1,15 @@
1
+ import type { string_markdown } from '../../types/typeAliases';
2
+ import type { PromptResultUsage } from '../PromptResultUsage';
3
+ /**
4
+ * Function `usageToHuman` will take usage and convert it to human readable report
5
+ */
6
+ export declare function usageToHuman(usage: PromptResultUsage): string_markdown;
7
+ /**
8
+ * TODO: Use "$1" not "1 USD"
9
+ * TODO: Use markdown formatting like "Cost approximately **$1**"
10
+ * TODO: Report in minutes, seconds, days NOT 0.1 hours
11
+ * TODO: [🧠] Maybe make from `uncertainNumberToHuman` separate exported utility
12
+ * TODO: When negligible usage, report "Negligible" or just don't report it
13
+ * TODO: [🧠] Maybe use "~" instead of "approximately"
14
+ * TODO: [🏛] Maybe make some markdown builder
15
+ */
@@ -12,6 +12,8 @@ export type CreateLlmToolsFromEnvOptions = {
12
12
  *
13
13
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
14
14
  *
15
+ * @@@ .env
16
+ *
15
17
  * It looks for environment variables:
16
18
  * - `process.env.OPENAI_API_KEY`
17
19
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -20,6 +22,7 @@ export type CreateLlmToolsFromEnvOptions = {
20
22
  */
21
23
  export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
22
24
  /**
25
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
23
26
  * TODO: [🔼] !!! Export via `@promptbook/node`
24
27
  * TODO: @@@ write discussion about this - wizzard
25
28
  * TODO: Add Azure
@@ -27,4 +30,5 @@ export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOpt
27
30
  * TODO: [🧠] Is there some meaningfull way how to test this util
28
31
  * TODO: [🧠] Maybe pass env as argument
29
32
  * Note: [🟢] This code should never be published outside of `@promptbook/node`
33
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
30
34
  */
@@ -1,10 +1,20 @@
1
- import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
1
+ import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
2
+ type GetLlmToolsForCliOptions = {
3
+ /**
4
+ * @@@
5
+ *
6
+ * @default false
7
+ */
8
+ isCacheReloaded?: boolean;
9
+ };
2
10
  /**
3
11
  * Returns LLM tools for CLI
4
12
  *
5
13
  * @private within the repository - for CLI utils
6
14
  */
7
- export declare function getLlmToolsForCli(): LlmExecutionTools;
15
+ export declare function getLlmToolsForCli(options?: GetLlmToolsForCliOptions): LlmExecutionToolsWithTotalUsage;
16
+ export {};
8
17
  /**
9
18
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
19
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
10
20
  */
@@ -1,11 +1,21 @@
1
- import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
1
  import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
2
+ import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
3
+ type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromEnvOptions & {
4
+ /**
5
+ * @@@
6
+ *
7
+ * @default false
8
+ */
9
+ isCacheReloaded?: boolean;
10
+ };
3
11
  /**
4
12
  * Returns LLM tools for testing purposes
5
13
  *
6
14
  * @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
7
15
  */
8
- export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
16
+ export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: GetLlmToolsForTestingAndScriptsAndPlaygroundOptions): LlmExecutionToolsWithTotalUsage;
17
+ export {};
9
18
  /**
10
19
  * Note: [⚪] This should never be in any released package
20
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
11
21
  */
@@ -2,9 +2,15 @@ import type { PromptbookStorage } from '../../../../storage/_common/PromptbookSt
2
2
  import type { CacheItem } from './CacheItem';
3
3
  export type CacheLlmToolsOptions = {
4
4
  /**
5
- * Total cost of the execution
5
+ * @@@
6
6
  *
7
7
  * @default MemoryStorage
8
8
  */
9
9
  storage: PromptbookStorage<CacheItem>;
10
+ /**
11
+ * @@@
12
+ *
13
+ * @default false
14
+ */
15
+ isReloaded?: boolean;
10
16
  };
@@ -3,14 +3,17 @@ import type { CacheLlmToolsOptions } from './CacheLlmToolsOptions';
3
3
  /**
4
4
  * Intercepts LLM tools and counts total usage of the tools
5
5
  *
6
- * @param llmTools LLM tools to be intercepted with usage counting
6
+ * Note: It can take extended `LlmExecutionTools` and cache the
7
+ *
8
+ * @param llmTools LLM tools to be intercepted with usage counting, it can contain extra methods like `totalUsage`
7
9
  * @returns LLM tools with same functionality with added total cost counting
8
10
  */
9
- export declare function cacheLlmTools(llmTools: LlmExecutionTools, options?: Partial<CacheLlmToolsOptions>): LlmExecutionTools;
11
+ export declare function cacheLlmTools<TLlmTools extends LlmExecutionTools>(llmTools: TLlmTools, options?: Partial<CacheLlmToolsOptions>): TLlmTools;
10
12
  /**
11
13
  * TODO: [🔼] !!! Export via `@promptbook/core`
12
- * TODO: @@@ write discussion about this and storages
13
- * write how to combine multiple interceptors
14
14
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
15
15
  * TODO: [🧠] Is there some meaningfull way how to test this util
16
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
17
+ * @@@ write discussion about this and storages
18
+ * @@@ write how to combine multiple interceptors
16
19
  */
@@ -0,0 +1,15 @@
1
+ import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
+ import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
3
+ /**
4
+ * LLM tools with option to get total usage of the execution
5
+ */
6
+ export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
7
+ /**
8
+ * Get total cost of the execution up to this point
9
+ */
10
+ getTotalUsage(): PromptResultUsage;
11
+ };
12
+ /**
13
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
14
+ * Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
15
+ */
@@ -1,14 +1,17 @@
1
1
  import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
- import type { LlmExecutionToolsWithTotalCost } from './LlmExecutionToolsWithTotalCost';
2
+ import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
3
3
  /**
4
4
  * Intercepts LLM tools and counts total usage of the tools
5
5
  *
6
6
  * @param llmTools LLM tools to be intercepted with usage counting
7
7
  * @returns LLM tools with same functionality with added total cost counting
8
8
  */
9
- export declare function countTotalUsage(llmTools: LlmExecutionTools): LlmExecutionToolsWithTotalCost;
9
+ export declare function countTotalUsage(llmTools: LlmExecutionTools): LlmExecutionToolsWithTotalUsage;
10
10
  /**
11
11
  * TODO: [🔼] !!! Export via `@promptbookcore/`
12
12
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
13
13
  * TODO: [🧠] Is there some meaningfull way how to test this util
14
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
15
+ * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
16
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
14
17
  */
@@ -2,17 +2,17 @@ import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools'
2
2
  import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
3
3
  import type { PromptbookStorage } from '../../../../storage/_common/PromptbookStorage';
4
4
  import type { TODO_any } from '../../../../utils/organization/TODO_any';
5
- import type { LlmExecutionToolsWithTotalCost } from './LlmExecutionToolsWithTotalCost';
5
+ import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
6
6
  /**
7
- * Options for `limitTotalCost`
7
+ * Options for `limitTotalUsage`
8
8
  */
9
- type LimitTotalCostOptions = {
9
+ type LimitTotalUsageOptions = {
10
10
  /**
11
11
  * @@@
12
12
  *
13
13
  * @default ZERO_USAGE
14
14
  */
15
- maxTotalCost: PromptResultUsage;
15
+ maxTotalUsage: PromptResultUsage;
16
16
  /**
17
17
  * @@@
18
18
  *
@@ -23,10 +23,13 @@ type LimitTotalCostOptions = {
23
23
  /**
24
24
  * @@@
25
25
  */
26
- export declare function limitTotalCost(llmTools: LlmExecutionTools, options?: Partial<LimitTotalCostOptions>): LlmExecutionToolsWithTotalCost;
26
+ export declare function limitTotalUsage(llmTools: LlmExecutionTools, options?: Partial<LimitTotalUsageOptions>): LlmExecutionToolsWithTotalUsage;
27
27
  export {};
28
28
  /**
29
29
  * TODO: [🔼] !!! Export via `@promptbookcore/`
30
+ * TODO: Maybe internally use `countTotalUsage`
30
31
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
31
32
  * TODO: [🧠] Is there some meaningfull way how to test this util
33
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
34
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
32
35
  */
@@ -46,4 +46,5 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
46
46
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
47
47
  * TODO: Maybe make custom OpenaiError
48
48
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
49
+ * TODO: [🍜] Auto use anonymous server in browser
49
50
  */
@@ -6,3 +6,6 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutio
6
6
  * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
7
  */
8
8
  export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
9
+ /**
10
+ * TODO: [🍜] Auto add WebGPT / Promptbook.studio anonymous server in browser
11
+ */
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
+ * TODO: [🍜] Playground with WebGPT / Promptbook.studio anonymous server
4
5
  * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
6
  */
@@ -14,3 +14,6 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
14
14
  * Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`
15
15
  */
16
16
  export declare function joinLlmExecutionTools(...llmExecutionTools: Array<LlmExecutionTools>): MultipleLlmExecutionTools;
17
+ /**
18
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
19
+ */
@@ -51,4 +51,5 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
51
51
  /**
52
52
  * TODO: [🍓] Allow to list compatible models with each variant
53
53
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
54
- */
54
+ * TODO: [🍜] Add anonymous option
55
+ */
@@ -26,3 +26,6 @@ export type RemoteServerOptions = CommonExecutionToolsOptions & {
26
26
  */
27
27
  createLlmExecutionTools(clientId: client_id): LlmExecutionTools;
28
28
  };
29
+ /**
30
+ * TODO: [🍜] Add anonymous option
31
+ */
@@ -10,6 +10,7 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
10
10
  */
11
11
  export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
12
12
  /**
13
+ * TODO: [🍜] Add anonymous option
13
14
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
14
15
  * TODO: Handle progress - support streaming
15
16
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout
@@ -12,6 +12,5 @@ export declare function preparePipeline(pipeline: PipelineJson, options: Prepare
12
12
  * TODO: Write tests for `preparePipeline`
13
13
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
14
14
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
15
- * TODO: [🎐] !!!!! Use here countTotalUsage
16
15
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
17
16
  */
@@ -13,7 +13,7 @@ export type PreparationJson = {
13
13
  /**
14
14
  * Usage of the prompt execution
15
15
  */
16
- readonly modelUsage: PromptResultUsage;
16
+ readonly usage: PromptResultUsage;
17
17
  };
18
18
  /**
19
19
  * TODO: [🍙] Make some standart order of json properties
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/markdown-utils",
3
- "version": "0.61.0",
3
+ "version": "0.62.0-1",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -34,7 +34,7 @@
34
34
  },
35
35
  "homepage": "https://www.npmjs.com/package/@promptbook/core",
36
36
  "dependencies": {
37
- "spacetrim": "0.11.37"
37
+ "spacetrim": "0.11.39"
38
38
  },
39
39
  "funding": [
40
40
  {
@@ -47,7 +47,7 @@
47
47
  }
48
48
  ],
49
49
  "peerDependencies": {
50
- "@promptbook/core": "0.61.0"
50
+ "@promptbook/core": "0.62.0-1"
51
51
  },
52
52
  "main": "./umd/index.umd.js",
53
53
  "module": "./esm/index.es.js",
@@ -26,7 +26,7 @@ declare const _default: ({
26
26
  preparations: {
27
27
  id: number;
28
28
  promptbookVersion: string;
29
- modelUsage: {
29
+ usage: {
30
30
  price: {
31
31
  value: number;
32
32
  };
@@ -113,7 +113,7 @@ declare const _default: ({
113
113
  preparations: {
114
114
  id: number;
115
115
  promptbookVersion: string;
116
- modelUsage: {
116
+ usage: {
117
117
  price: {
118
118
  value: number;
119
119
  };
@@ -195,7 +195,7 @@ declare const _default: ({
195
195
  preparations: {
196
196
  id: number;
197
197
  promptbookVersion: string;
198
- modelUsage: {
198
+ usage: {
199
199
  price: {
200
200
  value: number;
201
201
  };
@@ -26,6 +26,7 @@ import { embeddingVectorToString } from '../execution/embeddingVectorToString';
26
26
  import { addUsage } from '../execution/utils/addUsage';
27
27
  import { checkExpectations, isPassingExpectations } from '../execution/utils/checkExpectations';
28
28
  import { usageToWorktime } from '../execution/utils/usageToWorktime';
29
+ import { usageToHuman } from '../execution/utils/usageToHuman';
29
30
  import { CallbackInterfaceTools } from '../knowledge/dialogs/callback/CallbackInterfaceTools';
30
31
  import type { CallbackInterfaceToolsOptions } from '../knowledge/dialogs/callback/CallbackInterfaceToolsOptions';
31
32
  import { SimplePromptInterfaceTools } from '../knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools';
@@ -39,7 +40,7 @@ import { executionReportJsonToString } from '../types/execution-report/execution
39
40
  import { PROMPTBOOK_VERSION } from '../version';
40
41
  export { PROMPTBOOK_VERSION };
41
42
  export { BlockTypes, RESERVED_PARAMETER_NAMES };
42
- export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, };
43
+ export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, usageToHuman, };
43
44
  export { collectionToJson, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createSubcollection, };
44
45
  export { SimplePromptInterfaceTools };
45
46
  export { pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, stringifyPipelineJson, validatePipeline, };
@@ -10,5 +10,6 @@ export declare function pipelineJsonToString(pipelineJson: PipelineJson): Pipeli
10
10
  /**
11
11
  * TODO: !!!! Implement new features and commands into `promptTemplateParameterJsonToString`
12
12
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
13
- * TODO: Escape all
13
+ * TODO: [🏛] Maybe make some markdown builder
14
+ * TODO: [🏛] Escape all
14
15
  */
@@ -58,6 +58,7 @@ export declare function createPipelineExecutor(options: CreatePipelineExecutorOp
58
58
  export {};
59
59
  /**
60
60
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
61
+ * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
61
62
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
62
63
  * TODO: [♈] Probbably move expectations from templates to parameters
63
64
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -0,0 +1,15 @@
1
+ import type { string_markdown } from '../../types/typeAliases';
2
+ import type { PromptResultUsage } from '../PromptResultUsage';
3
+ /**
4
+ * Function `usageToHuman` will take usage and convert it to human readable report
5
+ */
6
+ export declare function usageToHuman(usage: PromptResultUsage): string_markdown;
7
+ /**
8
+ * TODO: Use "$1" not "1 USD"
9
+ * TODO: Use markdown formatting like "Cost approximately **$1**"
10
+ * TODO: Report in minutes, seconds, days NOT 0.1 hours
11
+ * TODO: [🧠] Maybe make from `uncertainNumberToHuman` separate exported utility
12
+ * TODO: When negligible usage, report "Negligible" or just don't report it
13
+ * TODO: [🧠] Maybe use "~" instead of "approximately"
14
+ * TODO: [🏛] Maybe make some markdown builder
15
+ */
@@ -12,6 +12,8 @@ export type CreateLlmToolsFromEnvOptions = {
12
12
  *
13
13
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
14
14
  *
15
+ * @@@ .env
16
+ *
15
17
  * It looks for environment variables:
16
18
  * - `process.env.OPENAI_API_KEY`
17
19
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -20,6 +22,7 @@ export type CreateLlmToolsFromEnvOptions = {
20
22
  */
21
23
  export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
22
24
  /**
25
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
23
26
  * TODO: [🔼] !!! Export via `@promptbook/node`
24
27
  * TODO: @@@ write discussion about this - wizzard
25
28
  * TODO: Add Azure
@@ -27,4 +30,5 @@ export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOpt
27
30
  * TODO: [🧠] Is there some meaningfull way how to test this util
28
31
  * TODO: [🧠] Maybe pass env as argument
29
32
  * Note: [🟢] This code should never be published outside of `@promptbook/node`
33
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
30
34
  */
@@ -1,10 +1,20 @@
1
- import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
1
+ import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
2
+ type GetLlmToolsForCliOptions = {
3
+ /**
4
+ * @@@
5
+ *
6
+ * @default false
7
+ */
8
+ isCacheReloaded?: boolean;
9
+ };
2
10
  /**
3
11
  * Returns LLM tools for CLI
4
12
  *
5
13
  * @private within the repository - for CLI utils
6
14
  */
7
- export declare function getLlmToolsForCli(): LlmExecutionTools;
15
+ export declare function getLlmToolsForCli(options?: GetLlmToolsForCliOptions): LlmExecutionToolsWithTotalUsage;
16
+ export {};
8
17
  /**
9
18
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
19
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
10
20
  */
@@ -1,11 +1,21 @@
1
- import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
1
  import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
2
+ import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
3
+ type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromEnvOptions & {
4
+ /**
5
+ * @@@
6
+ *
7
+ * @default false
8
+ */
9
+ isCacheReloaded?: boolean;
10
+ };
3
11
  /**
4
12
  * Returns LLM tools for testing purposes
5
13
  *
6
14
  * @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
7
15
  */
8
- export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
16
+ export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: GetLlmToolsForTestingAndScriptsAndPlaygroundOptions): LlmExecutionToolsWithTotalUsage;
17
+ export {};
9
18
  /**
10
19
  * Note: [⚪] This should never be in any released package
20
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
11
21
  */
@@ -2,9 +2,15 @@ import type { PromptbookStorage } from '../../../../storage/_common/PromptbookSt
2
2
  import type { CacheItem } from './CacheItem';
3
3
  export type CacheLlmToolsOptions = {
4
4
  /**
5
- * Total cost of the execution
5
+ * @@@
6
6
  *
7
7
  * @default MemoryStorage
8
8
  */
9
9
  storage: PromptbookStorage<CacheItem>;
10
+ /**
11
+ * @@@
12
+ *
13
+ * @default false
14
+ */
15
+ isReloaded?: boolean;
10
16
  };
@@ -3,14 +3,17 @@ import type { CacheLlmToolsOptions } from './CacheLlmToolsOptions';
3
3
  /**
4
4
  * Intercepts LLM tools and counts total usage of the tools
5
5
  *
6
- * @param llmTools LLM tools to be intercepted with usage counting
6
+ * Note: It can take extended `LlmExecutionTools` and cache the
7
+ *
8
+ * @param llmTools LLM tools to be intercepted with usage counting, it can contain extra methods like `totalUsage`
7
9
  * @returns LLM tools with same functionality with added total cost counting
8
10
  */
9
- export declare function cacheLlmTools(llmTools: LlmExecutionTools, options?: Partial<CacheLlmToolsOptions>): LlmExecutionTools;
11
+ export declare function cacheLlmTools<TLlmTools extends LlmExecutionTools>(llmTools: TLlmTools, options?: Partial<CacheLlmToolsOptions>): TLlmTools;
10
12
  /**
11
13
  * TODO: [🔼] !!! Export via `@promptbook/core`
12
- * TODO: @@@ write discussion about this and storages
13
- * write how to combine multiple interceptors
14
14
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
15
15
  * TODO: [🧠] Is there some meaningfull way how to test this util
16
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
17
+ * @@@ write discussion about this and storages
18
+ * @@@ write how to combine multiple interceptors
16
19
  */
@@ -0,0 +1,15 @@
1
+ import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
+ import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
3
+ /**
4
+ * LLM tools with option to get total usage of the execution
5
+ */
6
+ export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
7
+ /**
8
+ * Get total cost of the execution up to this point
9
+ */
10
+ getTotalUsage(): PromptResultUsage;
11
+ };
12
+ /**
13
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
14
+ * Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
15
+ */
@@ -1,14 +1,17 @@
1
1
  import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
- import type { LlmExecutionToolsWithTotalCost } from './LlmExecutionToolsWithTotalCost';
2
+ import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
3
3
  /**
4
4
  * Intercepts LLM tools and counts total usage of the tools
5
5
  *
6
6
  * @param llmTools LLM tools to be intercepted with usage counting
7
7
  * @returns LLM tools with same functionality with added total cost counting
8
8
  */
9
- export declare function countTotalUsage(llmTools: LlmExecutionTools): LlmExecutionToolsWithTotalCost;
9
+ export declare function countTotalUsage(llmTools: LlmExecutionTools): LlmExecutionToolsWithTotalUsage;
10
10
  /**
11
11
  * TODO: [🔼] !!! Export via `@promptbookcore/`
12
12
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
13
13
  * TODO: [🧠] Is there some meaningfull way how to test this util
14
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
15
+ * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
16
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
14
17
  */
@@ -2,17 +2,17 @@ import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools'
2
2
  import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
3
3
  import type { PromptbookStorage } from '../../../../storage/_common/PromptbookStorage';
4
4
  import type { TODO_any } from '../../../../utils/organization/TODO_any';
5
- import type { LlmExecutionToolsWithTotalCost } from './LlmExecutionToolsWithTotalCost';
5
+ import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
6
6
  /**
7
- * Options for `limitTotalCost`
7
+ * Options for `limitTotalUsage`
8
8
  */
9
- type LimitTotalCostOptions = {
9
+ type LimitTotalUsageOptions = {
10
10
  /**
11
11
  * @@@
12
12
  *
13
13
  * @default ZERO_USAGE
14
14
  */
15
- maxTotalCost: PromptResultUsage;
15
+ maxTotalUsage: PromptResultUsage;
16
16
  /**
17
17
  * @@@
18
18
  *
@@ -23,10 +23,13 @@ type LimitTotalCostOptions = {
23
23
  /**
24
24
  * @@@
25
25
  */
26
- export declare function limitTotalCost(llmTools: LlmExecutionTools, options?: Partial<LimitTotalCostOptions>): LlmExecutionToolsWithTotalCost;
26
+ export declare function limitTotalUsage(llmTools: LlmExecutionTools, options?: Partial<LimitTotalUsageOptions>): LlmExecutionToolsWithTotalUsage;
27
27
  export {};
28
28
  /**
29
29
  * TODO: [🔼] !!! Export via `@promptbookcore/`
30
+ * TODO: Maybe internally use `countTotalUsage`
30
31
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
31
32
  * TODO: [🧠] Is there some meaningfull way how to test this util
33
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
34
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
32
35
  */
@@ -46,4 +46,5 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
46
46
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
47
47
  * TODO: Maybe make custom OpenaiError
48
48
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
49
+ * TODO: [🍜] Auto use anonymous server in browser
49
50
  */
@@ -6,3 +6,6 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutio
6
6
  * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
7
  */
8
8
  export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
9
+ /**
10
+ * TODO: [🍜] Auto add WebGPT / Promptbook.studio anonymous server in browser
11
+ */
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
+ * TODO: [🍜] Playground with WebGPT / Promptbook.studio anonymous server
4
5
  * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
6
  */
@@ -14,3 +14,6 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
14
14
  * Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`
15
15
  */
16
16
  export declare function joinLlmExecutionTools(...llmExecutionTools: Array<LlmExecutionTools>): MultipleLlmExecutionTools;
17
+ /**
18
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
19
+ */
@@ -51,4 +51,5 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
51
51
  /**
52
52
  * TODO: [🍓] Allow to list compatible models with each variant
53
53
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
54
- */
54
+ * TODO: [🍜] Add anonymous option
55
+ */
@@ -26,3 +26,6 @@ export type RemoteServerOptions = CommonExecutionToolsOptions & {
26
26
  */
27
27
  createLlmExecutionTools(clientId: client_id): LlmExecutionTools;
28
28
  };
29
+ /**
30
+ * TODO: [🍜] Add anonymous option
31
+ */
@@ -10,6 +10,7 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
10
10
  */
11
11
  export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
12
12
  /**
13
+ * TODO: [🍜] Add anonymous option
13
14
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
14
15
  * TODO: Handle progress - support streaming
15
16
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout
@@ -12,6 +12,5 @@ export declare function preparePipeline(pipeline: PipelineJson, options: Prepare
12
12
  * TODO: Write tests for `preparePipeline`
13
13
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
14
14
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
15
- * TODO: [🎐] !!!!! Use here countTotalUsage
16
15
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
17
16
  */
@@ -13,7 +13,7 @@ export type PreparationJson = {
13
13
  /**
14
14
  * Usage of the prompt execution
15
15
  */
16
- readonly modelUsage: PromptResultUsage;
16
+ readonly usage: PromptResultUsage;
17
17
  };
18
18
  /**
19
19
  * TODO: [🍙] Make some standart order of json properties
@@ -1,11 +0,0 @@
1
- import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
- import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
3
- /**
4
- * LLM tools with option to get total cost of the execution
5
- */
6
- export type LlmExecutionToolsWithTotalCost = LlmExecutionTools & {
7
- /**
8
- * Total cost of the execution
9
- */
10
- totalUsage: PromptResultUsage;
11
- };
@@ -1,11 +0,0 @@
1
- import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
- import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
3
- /**
4
- * LLM tools with option to get total cost of the execution
5
- */
6
- export type LlmExecutionToolsWithTotalCost = LlmExecutionTools & {
7
- /**
8
- * Total cost of the execution
9
- */
10
- totalUsage: PromptResultUsage;
11
- };