@promptbook/markdown-utils 0.88.0 → 0.89.0-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +116 -83
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/types.index.d.ts +16 -4
  6. package/esm/typings/src/cli/cli-commands/login.d.ts +15 -0
  7. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +2 -2
  8. package/esm/typings/src/execution/PromptResult.d.ts +2 -2
  9. package/esm/typings/src/execution/{PromptResultUsage.d.ts → Usage.d.ts} +5 -5
  10. package/esm/typings/src/execution/utils/addUsage.d.ts +2 -2
  11. package/esm/typings/src/execution/utils/computeUsageCounts.d.ts +3 -3
  12. package/esm/typings/src/execution/utils/usage-constants.d.ts +77 -60
  13. package/esm/typings/src/execution/utils/usageToHuman.d.ts +5 -5
  14. package/esm/typings/src/execution/utils/usageToWorktime.d.ts +5 -5
  15. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +9 -2
  16. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/{countTotalUsage.d.ts → countUsage.d.ts} +1 -1
  17. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/limitTotalUsage.d.ts +2 -2
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +2 -2
  19. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +0 -9
  20. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.d.ts +2 -2
  21. package/esm/typings/src/pipeline/PipelineJson/PreparationJson.d.ts +2 -2
  22. package/esm/typings/src/playground/BrjappConnector.d.ts +67 -0
  23. package/esm/typings/src/playground/brjapp-api-schema.d.ts +12879 -0
  24. package/esm/typings/src/playground/playground.d.ts +5 -0
  25. package/esm/typings/src/remote-server/socket-types/_subtypes/PromptbookServer_Identification.d.ts +2 -1
  26. package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +15 -3
  27. package/esm/typings/src/types/typeAliases.d.ts +8 -2
  28. package/package.json +1 -1
  29. package/umd/index.umd.js +116 -83
  30. package/umd/index.umd.js.map +1 -1
@@ -66,6 +66,8 @@ import type { ExecutionReportStringOptions } from '../execution/execution-report
66
66
  import { ExecutionReportStringOptionsDefaults } from '../execution/execution-report/ExecutionReportStringOptions';
67
67
  import { addUsage } from '../execution/utils/addUsage';
68
68
  import { isPassingExpectations } from '../execution/utils/checkExpectations';
69
+ import { ZERO_VALUE } from '../execution/utils/usage-constants';
70
+ import { UNCERTAIN_ZERO_VALUE } from '../execution/utils/usage-constants';
69
71
  import { ZERO_USAGE } from '../execution/utils/usage-constants';
70
72
  import { UNCERTAIN_USAGE } from '../execution/utils/usage-constants';
71
73
  import { usageToHuman } from '../execution/utils/usageToHuman';
@@ -87,7 +89,7 @@ import { $llmToolsMetadataRegister } from '../llm-providers/_common/register/$ll
87
89
  import { $llmToolsRegister } from '../llm-providers/_common/register/$llmToolsRegister';
88
90
  import { createLlmToolsFromConfiguration } from '../llm-providers/_common/register/createLlmToolsFromConfiguration';
89
91
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
90
- import { countTotalUsage } from '../llm-providers/_common/utils/count-total-usage/countTotalUsage';
92
+ import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
91
93
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
92
94
  import { _AnthropicClaudeMetadataRegistration } from '../llm-providers/anthropic-claude/register-configuration';
93
95
  import { _AzureOpenAiMetadataRegistration } from '../llm-providers/azure-openai/register-configuration';
@@ -196,6 +198,8 @@ export type { ExecutionReportStringOptions };
196
198
  export { ExecutionReportStringOptionsDefaults };
197
199
  export { addUsage };
198
200
  export { isPassingExpectations };
201
+ export { ZERO_VALUE };
202
+ export { UNCERTAIN_ZERO_VALUE };
199
203
  export { ZERO_USAGE };
200
204
  export { UNCERTAIN_USAGE };
201
205
  export { usageToHuman };
@@ -217,7 +221,7 @@ export { $llmToolsMetadataRegister };
217
221
  export { $llmToolsRegister };
218
222
  export { createLlmToolsFromConfiguration };
219
223
  export { cacheLlmTools };
220
- export { countTotalUsage };
224
+ export { countUsage };
221
225
  export { limitTotalUsage };
222
226
  export { _AnthropicClaudeMetadataRegistration };
223
227
  export { _AzureOpenAiMetadataRegistration };
@@ -54,11 +54,11 @@ import type { PromptResult } from '../execution/PromptResult';
54
54
  import type { CompletionPromptResult } from '../execution/PromptResult';
55
55
  import type { ChatPromptResult } from '../execution/PromptResult';
56
56
  import type { EmbeddingPromptResult } from '../execution/PromptResult';
57
- import type { PromptResultUsage } from '../execution/PromptResultUsage';
58
- import type { PromptResultUsageCounts } from '../execution/PromptResultUsage';
59
57
  import type { ScriptExecutionTools } from '../execution/ScriptExecutionTools';
60
58
  import type { ScriptExecutionToolsExecuteOptions } from '../execution/ScriptExecutionTools';
61
59
  import type { UncertainNumber } from '../execution/UncertainNumber';
60
+ import type { Usage } from '../execution/Usage';
61
+ import type { UsageCounts } from '../execution/Usage';
62
62
  import type { UserInterfaceTools } from '../execution/UserInterfaceTools';
63
63
  import type { UserInterfaceToolsPromptDialogOptions } from '../execution/UserInterfaceTools';
64
64
  import type { FormatSubvalueDefinition } from '../formats/_common/FormatSubvalueDefinition';
@@ -106,6 +106,11 @@ import type { ScriptTaskJson } from '../pipeline/PipelineJson/ScriptTaskJson';
106
106
  import type { SimpleTaskJson } from '../pipeline/PipelineJson/SimpleTaskJson';
107
107
  import type { TaskJson } from '../pipeline/PipelineJson/TaskJson';
108
108
  import type { PipelineString } from '../pipeline/PipelineString';
109
+ import type { paths } from '../playground/brjapp-api-schema';
110
+ import type { webhooks } from '../playground/brjapp-api-schema';
111
+ import type { components } from '../playground/brjapp-api-schema';
112
+ import type { $defs } from '../playground/brjapp-api-schema';
113
+ import type { operations } from '../playground/brjapp-api-schema';
109
114
  import type { PrepareAndScrapeOptions } from '../prepare/PrepareAndScrapeOptions';
110
115
  import type { PromptbookServer_Identification } from '../remote-server/socket-types/_subtypes/PromptbookServer_Identification';
111
116
  import type { PromptbookServer_ApplicationIdentification } from '../remote-server/socket-types/_subtypes/PromptbookServer_Identification';
@@ -174,6 +179,7 @@ import type { string_markdown_text } from '../types/typeAliases';
174
179
  import type { string_markdown_codeblock_language } from '../types/typeAliases';
175
180
  import type { string_promptbook_documentation_url } from '../types/typeAliases';
176
181
  import type { string_domain } from '../types/typeAliases';
182
+ import type { string_origin } from '../types/typeAliases';
177
183
  import type { string_tdl } from '../types/typeAliases';
178
184
  import type { string_css } from '../types/typeAliases';
179
185
  import type { string_svg } from '../types/typeAliases';
@@ -337,11 +343,11 @@ export type { PromptResult };
337
343
  export type { CompletionPromptResult };
338
344
  export type { ChatPromptResult };
339
345
  export type { EmbeddingPromptResult };
340
- export type { PromptResultUsage };
341
- export type { PromptResultUsageCounts };
342
346
  export type { ScriptExecutionTools };
343
347
  export type { ScriptExecutionToolsExecuteOptions };
344
348
  export type { UncertainNumber };
349
+ export type { Usage };
350
+ export type { UsageCounts };
345
351
  export type { UserInterfaceTools };
346
352
  export type { UserInterfaceToolsPromptDialogOptions };
347
353
  export type { FormatSubvalueDefinition };
@@ -389,6 +395,11 @@ export type { ScriptTaskJson };
389
395
  export type { SimpleTaskJson };
390
396
  export type { TaskJson };
391
397
  export type { PipelineString };
398
+ export type { paths };
399
+ export type { webhooks };
400
+ export type { components };
401
+ export type { $defs };
402
+ export type { operations };
392
403
  export type { PrepareAndScrapeOptions };
393
404
  export type { PromptbookServer_Identification };
394
405
  export type { PromptbookServer_ApplicationIdentification };
@@ -457,6 +468,7 @@ export type { string_markdown_text };
457
468
  export type { string_markdown_codeblock_language };
458
469
  export type { string_promptbook_documentation_url };
459
470
  export type { string_domain };
471
+ export type { string_origin };
460
472
  export type { string_tdl };
461
473
  export type { string_css };
462
474
  export type { string_svg };
@@ -0,0 +1,15 @@
1
+ import type { Command as Program } from 'commander';
2
+ /**
3
+ * Initializes `login` command for Promptbook CLI utilities
4
+ *
5
+ * Note: `$` is used to indicate that this function is not a pure function - it registers a command in the CLI
6
+ *
7
+ * @private internal function of `promptbookCli`
8
+ */
9
+ export declare function $initializeLoginCommand(program: Program): void;
10
+ /**
11
+ * TODO: Pass remote server URL (and path)
12
+ * TODO: Implement non-interactive login
13
+ * Note: [💞] Ignore a discrepancy between file name and entity name
14
+ * Note: [🟡] Code in this file should never be published outside of `@promptbook/cli`
15
+ */
@@ -3,7 +3,7 @@ import type { PipelineJson } from '../pipeline/PipelineJson/PipelineJson';
3
3
  import type { Parameters } from '../types/typeAliases';
4
4
  import type { AbstractTaskResult } from './AbstractTaskResult';
5
5
  import type { ExecutionReportJson } from './execution-report/ExecutionReportJson';
6
- import type { PromptResultUsage } from './PromptResultUsage';
6
+ import type { Usage } from './Usage';
7
7
  /**
8
8
  * @@@
9
9
  *
@@ -19,7 +19,7 @@ export type PipelineExecutorResult = AbstractTaskResult & {
19
19
  /**
20
20
  * Added usage of whole execution, detailed usage is available in `executionReport`
21
21
  */
22
- readonly usage: ReadonlyDeep<PromptResultUsage>;
22
+ readonly usage: ReadonlyDeep<Usage>;
23
23
  /**
24
24
  * The report of the execution with all details
25
25
  */
@@ -3,7 +3,7 @@ import type { string_model_name } from '../types/typeAliases';
3
3
  import type { string_prompt } from '../types/typeAliases';
4
4
  import type { TODO_object } from '../utils/organization/TODO_object';
5
5
  import type { EmbeddingVector } from './EmbeddingVector';
6
- import type { PromptResultUsage } from './PromptResultUsage';
6
+ import type { Usage } from './Usage';
7
7
  /**
8
8
  * Prompt result is the simplest concept of execution.
9
9
  * It is the result of executing one prompt _(NOT a template)_.
@@ -71,7 +71,7 @@ export type CommonPromptResult = {
71
71
  /**
72
72
  * Usage of the prompt execution
73
73
  */
74
- readonly usage: PromptResultUsage;
74
+ readonly usage: Usage;
75
75
  /**
76
76
  * Exact text of the prompt (with all replacements)
77
77
  *
@@ -2,11 +2,11 @@ import type { KebabCase } from 'type-fest';
2
2
  import type { ExpectationUnit } from '../pipeline/PipelineJson/Expectations';
3
3
  import type { UncertainNumber } from './UncertainNumber';
4
4
  /**
5
- * Usage statistics for one or many prompt results
5
+ * Usage statistics for one or more prompt results
6
6
  *
7
7
  * Note: [🚉] This is fully serializable as JSON
8
8
  */
9
- export type PromptResultUsage = {
9
+ export type Usage = {
10
10
  /**
11
11
  * Cost of the execution in USD
12
12
  *
@@ -16,18 +16,18 @@ export type PromptResultUsage = {
16
16
  /**
17
17
  * Number of whatever used in the input aka. `prompt_tokens`
18
18
  */
19
- readonly input: PromptResultUsageCounts;
19
+ readonly input: UsageCounts;
20
20
  /**
21
21
  * Number of tokens used in the output aka. `completion_tokens`
22
22
  */
23
- readonly output: PromptResultUsageCounts;
23
+ readonly output: UsageCounts;
24
24
  };
25
25
  /**
26
26
  * Record of all possible measurable units
27
27
  *
28
28
  * Note: [🚉] This is fully serializable as JSON
29
29
  */
30
- export type PromptResultUsageCounts = Record<`${KebabCase<'TOKENS' | ExpectationUnit>}Count`, UncertainNumber>;
30
+ export type UsageCounts = Record<`${KebabCase<'TOKENS' | ExpectationUnit>}Count`, UncertainNumber>;
31
31
  /**
32
32
  * TODO: [🍙] Make some standard order of json properties
33
33
  */
@@ -1,4 +1,4 @@
1
- import type { PromptResultUsage } from '../PromptResultUsage';
1
+ import type { Usage } from '../Usage';
2
2
  /**
3
3
  * Function `addUsage` will add multiple usages into one
4
4
  *
@@ -6,4 +6,4 @@ import type { PromptResultUsage } from '../PromptResultUsage';
6
6
  *
7
7
  * @public exported from `@promptbook/core`
8
8
  */
9
- export declare function addUsage(...usageItems: ReadonlyArray<PromptResultUsage>): PromptResultUsage;
9
+ export declare function addUsage(...usageItems: ReadonlyArray<Usage>): Usage;
@@ -1,10 +1,10 @@
1
- import type { PromptResultUsageCounts } from '../PromptResultUsage';
1
+ import type { UsageCounts } from '../Usage';
2
2
  /**
3
3
  * Helper of usage compute
4
4
  *
5
5
  * @param content the content of prompt or response
6
- * @returns part of PromptResultUsageCounts
6
+ * @returns part of UsageCounts
7
7
  *
8
8
  * @private internal utility of LlmExecutionTools
9
9
  */
10
- export declare function computeUsageCounts(content: string): Omit<PromptResultUsageCounts, 'tokensCount'>;
10
+ export declare function computeUsageCounts(content: string): Omit<UsageCounts, 'tokensCount'>;
@@ -1,57 +1,74 @@
1
+ /**
2
+ * Represents the uncertain value
3
+ *
4
+ * @public exported from `@promptbook/core`
5
+ */
6
+ export declare const ZERO_VALUE: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
7
+ readonly value: 0;
8
+ }>;
9
+ /**
10
+ * Represents the uncertain value
11
+ *
12
+ * @public exported from `@promptbook/core`
13
+ */
14
+ export declare const UNCERTAIN_ZERO_VALUE: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
15
+ readonly value: 0;
16
+ readonly isUncertain: true;
17
+ }>;
1
18
  /**
2
19
  * Represents the usage with no resources consumed
3
20
  *
4
21
  * @public exported from `@promptbook/core`
5
22
  */
6
23
  export declare const ZERO_USAGE: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
7
- readonly price: {
24
+ readonly price: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
8
25
  readonly value: 0;
9
- };
26
+ }>;
10
27
  readonly input: {
11
- readonly tokensCount: {
28
+ readonly tokensCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
12
29
  readonly value: 0;
13
- };
14
- readonly charactersCount: {
30
+ }>;
31
+ readonly charactersCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
15
32
  readonly value: 0;
16
- };
17
- readonly wordsCount: {
33
+ }>;
34
+ readonly wordsCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
18
35
  readonly value: 0;
19
- };
20
- readonly sentencesCount: {
36
+ }>;
37
+ readonly sentencesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
21
38
  readonly value: 0;
22
- };
23
- readonly linesCount: {
39
+ }>;
40
+ readonly linesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
24
41
  readonly value: 0;
25
- };
26
- readonly paragraphsCount: {
42
+ }>;
43
+ readonly paragraphsCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
27
44
  readonly value: 0;
28
- };
29
- readonly pagesCount: {
45
+ }>;
46
+ readonly pagesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
30
47
  readonly value: 0;
31
- };
48
+ }>;
32
49
  };
33
50
  readonly output: {
34
- readonly tokensCount: {
51
+ readonly tokensCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
35
52
  readonly value: 0;
36
- };
37
- readonly charactersCount: {
53
+ }>;
54
+ readonly charactersCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
38
55
  readonly value: 0;
39
- };
40
- readonly wordsCount: {
56
+ }>;
57
+ readonly wordsCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
41
58
  readonly value: 0;
42
- };
43
- readonly sentencesCount: {
59
+ }>;
60
+ readonly sentencesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
44
61
  readonly value: 0;
45
- };
46
- readonly linesCount: {
62
+ }>;
63
+ readonly linesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
47
64
  readonly value: 0;
48
- };
49
- readonly paragraphsCount: {
65
+ }>;
66
+ readonly paragraphsCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
50
67
  readonly value: 0;
51
- };
52
- readonly pagesCount: {
68
+ }>;
69
+ readonly pagesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
53
70
  readonly value: 0;
54
- };
71
+ }>;
55
72
  };
56
73
  }>;
57
74
  /**
@@ -60,69 +77,69 @@ export declare const ZERO_USAGE: import("type-fest/source/readonly-deep").Readon
60
77
  * @public exported from `@promptbook/core`
61
78
  */
62
79
  export declare const UNCERTAIN_USAGE: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
63
- readonly price: {
80
+ readonly price: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
64
81
  readonly value: 0;
65
82
  readonly isUncertain: true;
66
- };
83
+ }>;
67
84
  readonly input: {
68
- readonly tokensCount: {
85
+ readonly tokensCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
69
86
  readonly value: 0;
70
87
  readonly isUncertain: true;
71
- };
72
- readonly charactersCount: {
88
+ }>;
89
+ readonly charactersCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
73
90
  readonly value: 0;
74
91
  readonly isUncertain: true;
75
- };
76
- readonly wordsCount: {
92
+ }>;
93
+ readonly wordsCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
77
94
  readonly value: 0;
78
95
  readonly isUncertain: true;
79
- };
80
- readonly sentencesCount: {
96
+ }>;
97
+ readonly sentencesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
81
98
  readonly value: 0;
82
99
  readonly isUncertain: true;
83
- };
84
- readonly linesCount: {
100
+ }>;
101
+ readonly linesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
85
102
  readonly value: 0;
86
103
  readonly isUncertain: true;
87
- };
88
- readonly paragraphsCount: {
104
+ }>;
105
+ readonly paragraphsCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
89
106
  readonly value: 0;
90
107
  readonly isUncertain: true;
91
- };
92
- readonly pagesCount: {
108
+ }>;
109
+ readonly pagesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
93
110
  readonly value: 0;
94
111
  readonly isUncertain: true;
95
- };
112
+ }>;
96
113
  };
97
114
  readonly output: {
98
- readonly tokensCount: {
115
+ readonly tokensCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
99
116
  readonly value: 0;
100
117
  readonly isUncertain: true;
101
- };
102
- readonly charactersCount: {
118
+ }>;
119
+ readonly charactersCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
103
120
  readonly value: 0;
104
121
  readonly isUncertain: true;
105
- };
106
- readonly wordsCount: {
122
+ }>;
123
+ readonly wordsCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
107
124
  readonly value: 0;
108
125
  readonly isUncertain: true;
109
- };
110
- readonly sentencesCount: {
126
+ }>;
127
+ readonly sentencesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
111
128
  readonly value: 0;
112
129
  readonly isUncertain: true;
113
- };
114
- readonly linesCount: {
130
+ }>;
131
+ readonly linesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
115
132
  readonly value: 0;
116
133
  readonly isUncertain: true;
117
- };
118
- readonly paragraphsCount: {
134
+ }>;
135
+ readonly paragraphsCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
119
136
  readonly value: 0;
120
137
  readonly isUncertain: true;
121
- };
122
- readonly pagesCount: {
138
+ }>;
139
+ readonly pagesCount: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
123
140
  readonly value: 0;
124
141
  readonly isUncertain: true;
125
- };
142
+ }>;
126
143
  };
127
144
  }>;
128
145
  /**
@@ -1,20 +1,20 @@
1
1
  import type { string_markdown } from '../../types/typeAliases';
2
- import type { PromptResultUsage } from '../PromptResultUsage';
3
2
  import type { UncertainNumber } from '../UncertainNumber';
3
+ import type { Usage } from '../Usage';
4
4
  /**
5
5
  * Minimal usage information required to calculate worktime
6
6
  */
7
- type PartialPromptResultUsage = Partial<PromptResultUsage> & {
7
+ type PartialUsage = Partial<Usage> & {
8
8
  price: UncertainNumber;
9
- input: Pick<PromptResultUsage['input'], 'wordsCount'>;
10
- output: Pick<PromptResultUsage['output'], 'wordsCount' | 'charactersCount'>;
9
+ input: Pick<Usage['input'], 'wordsCount'>;
10
+ output: Pick<Usage['output'], 'wordsCount' | 'charactersCount'>;
11
11
  };
12
12
  /**
13
13
  * Function `usageToHuman` will take usage and convert it to human readable report
14
14
  *
15
15
  * @public exported from `@promptbook/core`
16
16
  */
17
- export declare function usageToHuman(usage: PartialPromptResultUsage): string_markdown;
17
+ export declare function usageToHuman(usage: PartialUsage): string_markdown;
18
18
  export {};
19
19
  /**
20
20
  * TODO: [🍓][🧞‍♂️] Use "$1" not "1 USD"
@@ -1,11 +1,11 @@
1
- import type { PromptResultUsage } from '../PromptResultUsage';
2
1
  import type { UncertainNumber } from '../UncertainNumber';
2
+ import type { Usage } from '../Usage';
3
3
  /**
4
4
  * Minimal usage information required to calculate worktime
5
5
  */
6
- type PartialPromptResultUsage = Pick<PromptResultUsage, 'input' | 'output'> & {
7
- input: Pick<PromptResultUsage['input'], 'wordsCount'>;
8
- output: Pick<PromptResultUsage['output'], 'wordsCount'>;
6
+ type PartialUsage = Pick<Usage, 'input' | 'output'> & {
7
+ input: Pick<Usage['input'], 'wordsCount'>;
8
+ output: Pick<Usage['output'], 'wordsCount'>;
9
9
  };
10
10
  /**
11
11
  * Function usageToWorktime will take usage and estimate saved worktime in hours of reading / writing
@@ -16,5 +16,5 @@ type PartialPromptResultUsage = Pick<PromptResultUsage, 'input' | 'output'> & {
16
16
  *
17
17
  * @public exported from `@promptbook/core`
18
18
  */
19
- export declare function usageToWorktime(usage: PartialPromptResultUsage): UncertainNumber;
19
+ export declare function usageToWorktime(usage: PartialUsage): UncertainNumber;
20
20
  export {};
@@ -1,5 +1,6 @@
1
+ import type { Observable } from 'rxjs';
1
2
  import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
- import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
3
+ import type { Usage } from '../../../../execution/Usage';
3
4
  /**
4
5
  * LLM tools with option to get total usage of the execution
5
6
  */
@@ -7,7 +8,13 @@ export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
7
8
  /**
8
9
  * Get total cost of the execution up to this point
9
10
  */
10
- getTotalUsage(): PromptResultUsage;
11
+ getTotalUsage(): Usage;
12
+ /**
13
+ * Observable of total cost of the execution up to this point
14
+ *
15
+ * Note: This does report the cost of the last prompt, not the total cost of the execution up to this point
16
+ */
17
+ spending(): Observable<Usage>;
11
18
  };
12
19
  /**
13
20
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -7,7 +7,7 @@ import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTot
7
7
  * @returns LLM tools with same functionality with added total cost counting
8
8
  * @public exported from `@promptbook/core`
9
9
  */
10
- export declare function countTotalUsage(llmTools: LlmExecutionTools): LlmExecutionToolsWithTotalUsage;
10
+ export declare function countUsage(llmTools: LlmExecutionTools): LlmExecutionToolsWithTotalUsage;
11
11
  /**
12
12
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
13
13
  * TODO: [🧠] Is there some meaningfull way how to test this util
@@ -1,5 +1,5 @@
1
1
  import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
- import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
2
+ import type { Usage } from '../../../../execution/Usage';
3
3
  import type { PromptbookStorage } from '../../../../storage/_common/PromptbookStorage';
4
4
  import type { TODO_any } from '../../../../utils/organization/TODO_any';
5
5
  import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
@@ -12,7 +12,7 @@ type LimitTotalUsageOptions = {
12
12
  *
13
13
  * @default ZERO_USAGE
14
14
  */
15
- maxTotalUsage: PromptResultUsage;
15
+ maxTotalUsage: Usage;
16
16
  /**
17
17
  * @@@
18
18
  *
@@ -1,6 +1,6 @@
1
1
  import type Anthropic from '@anthropic-ai/sdk';
2
2
  import type { PartialDeep } from 'type-fest';
3
- import type { PromptResultUsage } from '../../execution/PromptResultUsage';
3
+ import type { Usage } from '../../execution/Usage';
4
4
  import type { Prompt } from '../../types/Prompt';
5
5
  /**
6
6
  * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
@@ -12,7 +12,7 @@ import type { Prompt } from '../../types/Prompt';
12
12
  * @private internal utility of `AnthropicClaudeExecutionTools`
13
13
  */
14
14
  export declare function computeAnthropicClaudeUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15
- resultContent: string, rawResponse: PartialDeep<Pick<Anthropic.Messages.Message, 'model' | 'usage'>>): PromptResultUsage;
15
+ resultContent: string, rawResponse: PartialDeep<Pick<Anthropic.Messages.Message, 'model' | 'usage'>>): Usage;
16
16
  /**
17
17
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
18
18
  */
@@ -8,8 +8,6 @@ import type { Prompt } from '../../types/Prompt';
8
8
  import type { string_markdown } from '../../types/typeAliases';
9
9
  import type { string_markdown_text } from '../../types/typeAliases';
10
10
  import type { string_title } from '../../types/typeAliases';
11
- import type { string_token } from '../../types/typeAliases';
12
- import { OpenAiAssistantExecutionTools } from './OpenAiAssistantExecutionTools';
13
11
  import type { OpenAiExecutionToolsOptions } from './OpenAiExecutionToolsOptions';
14
12
  /**
15
13
  * Execution Tools for calling OpenAI API
@@ -31,13 +29,6 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
31
29
  get title(): string_title & string_markdown_text;
32
30
  get description(): string_markdown;
33
31
  getClient(): Promise<OpenAI>;
34
- /**
35
- * Create (sub)tools for calling OpenAI API Assistants
36
- *
37
- * @param assistantId Which assistant to use
38
- * @returns Tools for calling OpenAI API Assistants with same token
39
- */
40
- createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools;
41
32
  /**
42
33
  * Check the `options` passed to `constructor`
43
34
  */
@@ -1,6 +1,6 @@
1
1
  import type OpenAI from 'openai';
2
2
  import type { PartialDeep } from 'type-fest';
3
- import type { PromptResultUsage } from '../../execution/PromptResultUsage';
3
+ import type { Usage } from '../../execution/Usage';
4
4
  import type { Prompt } from '../../types/Prompt';
5
5
  /**
6
6
  * Computes the usage of the OpenAI API based on the response from OpenAI
@@ -12,7 +12,7 @@ import type { Prompt } from '../../types/Prompt';
12
12
  * @private internal utility of `OpenAiExecutionTools`
13
13
  */
14
14
  export declare function computeOpenAiUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15
- resultContent: string, rawResponse: PartialDeep<Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>>): PromptResultUsage;
15
+ resultContent: string, rawResponse: PartialDeep<Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>>): Usage;
16
16
  /**
17
17
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
18
18
  */
@@ -1,4 +1,4 @@
1
- import type { PromptResultUsage } from '../../execution/PromptResultUsage';
1
+ import type { Usage } from '../../execution/Usage';
2
2
  import type { number_id } from '../../types/typeAliases';
3
3
  import type { string_promptbook_version } from '../../version';
4
4
  export type PreparationJson = {
@@ -13,7 +13,7 @@ export type PreparationJson = {
13
13
  /**
14
14
  * Usage of the prompt execution
15
15
  */
16
- readonly usage: PromptResultUsage;
16
+ readonly usage: Usage;
17
17
  };
18
18
  /**
19
19
  * TODO: [🍙] Make some standard order of json properties