@promptbook/openai 0.61.0-12 → 0.61.0-14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. package/esm/index.es.js +31 -13
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/execute-javascript.index.d.ts +1 -1
  4. package/esm/typings/src/_packages/types.index.d.ts +6 -3
  5. package/esm/typings/src/_packages/utils.index.d.ts +1 -1
  6. package/esm/typings/src/commands/MODEL/ModelCommand.d.ts +2 -2
  7. package/esm/typings/src/config.d.ts +5 -1
  8. package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +13 -0
  9. package/esm/typings/src/conversion/validation/_importPipeline.d.ts +10 -1
  10. package/esm/typings/src/conversion/validation/validatePipeline.d.ts +1 -1
  11. package/esm/typings/src/errors/LimitReachedError.d.ts +7 -0
  12. package/esm/typings/src/execution/LlmExecutionTools.d.ts +7 -7
  13. package/esm/typings/src/execution/PipelineExecutor.d.ts +1 -1
  14. package/esm/typings/src/execution/PromptResult.d.ts +14 -56
  15. package/esm/typings/src/execution/PromptResultUsage.d.ts +26 -0
  16. package/esm/typings/src/execution/UncertainNumber.d.ts +18 -0
  17. package/esm/typings/src/execution/utils/addUsage.d.ts +58 -2
  18. package/esm/typings/src/execution/utils/computeUsageCounts.d.ts +1 -1
  19. package/esm/typings/src/execution/utils/uncertainNumber.d.ts +1 -1
  20. package/esm/typings/src/execution/utils/usageToWorktime.d.ts +2 -2
  21. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +10 -2
  22. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +2 -1
  23. package/esm/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/_common/utils/count-total-cost/limitTotalCost.d.ts +32 -0
  25. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
  26. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -4
  27. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -4
  28. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +6 -6
  29. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -7
  30. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +6 -6
  31. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +9 -7
  33. package/esm/typings/src/scripting/javascript/utils/unknownToString.d.ts +2 -1
  34. package/esm/typings/src/types/ModelRequirements.d.ts +53 -14
  35. package/esm/typings/src/types/ModelVariant.d.ts +14 -0
  36. package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +2 -4
  37. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  38. package/esm/typings/src/types/Prompt.d.ts +45 -1
  39. package/esm/typings/src/types/typeAliases.d.ts +11 -0
  40. package/esm/typings/src/utils/deepClone.d.ts +9 -0
  41. package/esm/typings/src/utils/deepFreeze.d.ts +13 -0
  42. package/esm/typings/src/utils/normalization/parseKeywords.d.ts +2 -1
  43. package/{umd/typings/src/utils/organization/TODO.d.ts → esm/typings/src/utils/organization/TODO_any.d.ts} +1 -1
  44. package/esm/typings/src/utils/organization/TODO_object.d.ts +6 -0
  45. package/esm/typings/src/utils/organization/TODO_unknown.d.ts +6 -0
  46. package/esm/typings/src/utils/organization/just.d.ts +4 -1
  47. package/esm/typings/src/utils/organization/keepUnused.d.ts +16 -0
  48. package/esm/typings/src/utils/organization/really_any.d.ts +1 -1
  49. package/esm/typings/src/utils/organization/really_unknown.d.ts +6 -0
  50. package/esm/typings/src/utils/validators/email/isValidEmail.d.ts +2 -1
  51. package/esm/typings/src/utils/validators/filePath/isValidFilePath.d.ts +2 -1
  52. package/esm/typings/src/utils/validators/javascriptName/isValidJavascriptName.d.ts +2 -1
  53. package/esm/typings/src/utils/validators/semanticVersion/isValidPromptbookVersion.d.ts +2 -1
  54. package/esm/typings/src/utils/validators/semanticVersion/isValidSemanticVersion.d.ts +2 -1
  55. package/esm/typings/src/utils/validators/url/isValidPipelineUrl.d.ts +2 -1
  56. package/esm/typings/src/utils/validators/url/isValidUrl.d.ts +2 -1
  57. package/esm/typings/src/utils/validators/uuid/isValidUuid.d.ts +2 -1
  58. package/esm/typings/src/version.d.ts +0 -3
  59. package/package.json +2 -2
  60. package/umd/index.umd.js +31 -13
  61. package/umd/index.umd.js.map +1 -1
  62. package/umd/typings/src/_packages/execute-javascript.index.d.ts +1 -1
  63. package/umd/typings/src/_packages/types.index.d.ts +6 -3
  64. package/umd/typings/src/_packages/utils.index.d.ts +1 -1
  65. package/umd/typings/src/commands/MODEL/ModelCommand.d.ts +2 -2
  66. package/umd/typings/src/config.d.ts +5 -1
  67. package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +13 -0
  68. package/umd/typings/src/conversion/validation/_importPipeline.d.ts +10 -1
  69. package/umd/typings/src/conversion/validation/validatePipeline.d.ts +1 -1
  70. package/umd/typings/src/errors/LimitReachedError.d.ts +7 -0
  71. package/umd/typings/src/execution/LlmExecutionTools.d.ts +7 -7
  72. package/umd/typings/src/execution/PipelineExecutor.d.ts +1 -1
  73. package/umd/typings/src/execution/PromptResult.d.ts +14 -56
  74. package/umd/typings/src/execution/PromptResultUsage.d.ts +26 -0
  75. package/umd/typings/src/execution/UncertainNumber.d.ts +18 -0
  76. package/umd/typings/src/execution/utils/addUsage.d.ts +58 -2
  77. package/umd/typings/src/execution/utils/computeUsageCounts.d.ts +1 -1
  78. package/umd/typings/src/execution/utils/uncertainNumber.d.ts +1 -1
  79. package/umd/typings/src/execution/utils/usageToWorktime.d.ts +2 -2
  80. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +10 -2
  81. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +2 -1
  82. package/umd/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +1 -1
  83. package/umd/typings/src/llm-providers/_common/utils/count-total-cost/limitTotalCost.d.ts +32 -0
  84. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
  85. package/umd/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -4
  86. package/umd/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -4
  87. package/umd/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +6 -6
  88. package/umd/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -7
  89. package/umd/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +6 -6
  90. package/umd/typings/src/llm-providers/openai/computeOpenaiUsage.d.ts +1 -1
  91. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +9 -7
  92. package/umd/typings/src/scripting/javascript/utils/unknownToString.d.ts +2 -1
  93. package/umd/typings/src/types/ModelRequirements.d.ts +53 -14
  94. package/umd/typings/src/types/ModelVariant.d.ts +14 -0
  95. package/umd/typings/src/types/PipelineJson/PersonaJson.d.ts +2 -4
  96. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  97. package/umd/typings/src/types/Prompt.d.ts +45 -1
  98. package/umd/typings/src/types/typeAliases.d.ts +11 -0
  99. package/umd/typings/src/utils/deepClone.d.ts +9 -0
  100. package/umd/typings/src/utils/deepFreeze.d.ts +13 -0
  101. package/umd/typings/src/utils/normalization/parseKeywords.d.ts +2 -1
  102. package/{esm/typings/src/utils/organization/TODO.d.ts → umd/typings/src/utils/organization/TODO_any.d.ts} +1 -1
  103. package/umd/typings/src/utils/organization/TODO_object.d.ts +6 -0
  104. package/umd/typings/src/utils/organization/TODO_unknown.d.ts +6 -0
  105. package/umd/typings/src/utils/organization/just.d.ts +4 -1
  106. package/umd/typings/src/utils/organization/keepUnused.d.ts +16 -0
  107. package/umd/typings/src/utils/organization/really_any.d.ts +1 -1
  108. package/umd/typings/src/utils/organization/really_unknown.d.ts +6 -0
  109. package/umd/typings/src/utils/validators/email/isValidEmail.d.ts +2 -1
  110. package/umd/typings/src/utils/validators/filePath/isValidFilePath.d.ts +2 -1
  111. package/umd/typings/src/utils/validators/javascriptName/isValidJavascriptName.d.ts +2 -1
  112. package/umd/typings/src/utils/validators/semanticVersion/isValidPromptbookVersion.d.ts +2 -1
  113. package/umd/typings/src/utils/validators/semanticVersion/isValidSemanticVersion.d.ts +2 -1
  114. package/umd/typings/src/utils/validators/url/isValidPipelineUrl.d.ts +2 -1
  115. package/umd/typings/src/utils/validators/url/isValidUrl.d.ts +2 -1
  116. package/umd/typings/src/utils/validators/uuid/isValidUuid.d.ts +2 -1
  117. package/umd/typings/src/version.d.ts +0 -3
  118. package/esm/typings/src/utils/organization/keepImported.d.ts +0 -12
  119. package/esm/typings/src/utils/organization/notUsing.d.ts +0 -12
  120. package/umd/typings/src/utils/organization/keepImported.d.ts +0 -12
  121. package/umd/typings/src/utils/organization/notUsing.d.ts +0 -12
  122. /package/esm/typings/src/{execution/addPromptResultUsage.test.d.ts → conversion/utils/stringifyPipelineJson.test.d.ts} +0 -0
  123. /package/esm/typings/src/{utils/postprocessing → postprocessing/utils}/extractBlock.d.ts +0 -0
  124. /package/umd/typings/src/{execution/addPromptResultUsage.test.d.ts → conversion/utils/stringifyPipelineJson.test.d.ts} +0 -0
  125. /package/umd/typings/src/{utils/postprocessing → postprocessing/utils}/extractBlock.d.ts +0 -0
@@ -0,0 +1,13 @@
1
+ import type { string_json } from '../../types/typeAliases';
2
+ /**
3
+ * Stringify the PipelineJson with proper formatting
4
+ *
5
+ * Note: [0] It can be used for more JSON types like whole collection of pipelines, single knowledge piece, etc.
6
+ * Note: In contrast to JSON.stringify, this function ensures that **embedding index** is on single line
7
+ */
8
+ export declare function stringifyPipelineJson<TType>(pipeline: TType): string_json<TType>;
9
+ /**
10
+ * TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
11
+ * TODO: [🔼] Export alongside pipelineStringToJson
12
+ * TODO: [🧠] Maybe more elegant solution than replacing via regex
13
+ */
@@ -1,7 +1,8 @@
1
1
  import type { PipelineJson } from '../../types/PipelineJson/PipelineJson';
2
2
  import type { PipelineString } from '../../types/PipelineString';
3
+ import type { string_json } from '../../types/typeAliases';
3
4
  /**
4
- * Import the text file
5
+ * Import the pipeline.ptbk.md or pipeline.ptbk.json file
5
6
  *
6
7
  * Note: Using here custom import to work in jest tests
7
8
  * Note: Using sync version is 💩 in the production code, but it's ok here in tests
@@ -11,3 +12,11 @@ import type { PipelineString } from '../../types/PipelineString';
11
12
  */
12
13
  export declare function importPipelineWithoutPreparation(path: `${string}.ptbk.md`): PipelineString;
13
14
  export declare function importPipelineWithoutPreparation(path: `${string}.ptbk.json`): PipelineJson;
15
+ /**
16
+ * Import the pipeline.ptbk.json file as parsed JSON
17
+ */
18
+ export declare function importPipelineJson(path: `${string}.ptbk.json`): PipelineJson;
19
+ /**
20
+ * Import the pipeline.ptbk.json file as string
21
+ */
22
+ export declare function importPipelineJsonAsString(path: `${string}.ptbk.json`): string_json<PipelineJson>;
@@ -23,7 +23,7 @@ export declare function validatePipeline(pipeline: PipelineJson): PipelineJson;
23
23
  * > * It checks:
24
24
  * > * - it has a valid structure
25
25
  * > * - ...
26
- * > ex port function validatePipeline(promptbook: unknown): asserts promptbook is PipelineJson {
26
+ * > ex port function validatePipeline(promptbook: really_unknown): asserts promptbook is PipelineJson {
27
27
  */
28
28
  /**
29
29
  * TODO: [🧠][🐣] !!!! Validate that all samples match expectations
@@ -0,0 +1,7 @@
1
+ /**
2
+ * This error type indicates that some limit was reached
3
+ */
4
+ export declare class LimitReachedError extends Error {
5
+ readonly name = "LimitReachedError";
6
+ constructor(message: string);
7
+ }
@@ -1,13 +1,13 @@
1
1
  import type { Promisable } from 'type-fest';
2
- import type { ModelVariant } from '../types/ModelRequirements';
2
+ import type { ModelVariant } from '../types/ModelVariant';
3
3
  import type { Prompt } from '../types/Prompt';
4
4
  import type { string_markdown } from '../types/typeAliases';
5
5
  import type { string_markdown_text } from '../types/typeAliases';
6
6
  import type { string_model_name } from '../types/typeAliases';
7
7
  import type { string_title } from '../types/typeAliases';
8
- import type { PromptChatResult } from './PromptResult';
9
- import type { PromptCompletionResult } from './PromptResult';
10
- import type { PromptEmbeddingResult } from './PromptResult';
8
+ import type { ChatPromptResult } from './PromptResult';
9
+ import type { CompletionPromptResult } from './PromptResult';
10
+ import type { EmbeddingPromptResult } from './PromptResult';
11
11
  /**
12
12
  * Container for all the tools needed to execute prompts to large language models like GPT-4
13
13
  * On its interface it exposes common methods for prompt execution.
@@ -31,15 +31,15 @@ export type LlmExecutionTools = {
31
31
  /**
32
32
  * Calls a chat model
33
33
  */
34
- callChatModel?(prompt: Prompt): Promise<PromptChatResult>;
34
+ callChatModel?(prompt: Prompt): Promise<ChatPromptResult>;
35
35
  /**
36
36
  * Calls a completion model
37
37
  */
38
- callCompletionModel?(prompt: Prompt): Promise<PromptCompletionResult>;
38
+ callCompletionModel?(prompt: Prompt): Promise<CompletionPromptResult>;
39
39
  /**
40
40
  * Calls an embedding model
41
41
  */
42
- callEmbeddingModel?(prompt: Prompt): Promise<PromptEmbeddingResult>;
42
+ callEmbeddingModel?(prompt: Prompt): Promise<EmbeddingPromptResult>;
43
43
  /**
44
44
  * List all available models that can be used
45
45
  */
@@ -3,7 +3,7 @@ import type { TaskProgress } from '../types/TaskProgress';
3
3
  import type { ExecutionReportJson } from '../types/execution-report/ExecutionReportJson';
4
4
  import type { string_parameter_name } from '../types/typeAliases';
5
5
  import type { string_parameter_value } from '../types/typeAliases';
6
- import type { PromptResultUsage } from './PromptResult';
6
+ import type { PromptResultUsage } from './PromptResultUsage';
7
7
  /**
8
8
  * Executor is a simple async function that takes INPUT PARAMETERs and returns result parameters _(along with all intermediate parameters and INPUT PARAMETERs = it extends input object)_.
9
9
  * Executor is made by combining execution tools and pipeline collection.
@@ -1,37 +1,34 @@
1
- import type { KebabCase } from 'type-fest';
2
- import type { ExpectationUnit } from '../types/PipelineJson/Expectations';
3
- import type { number_positive } from '../types/typeAliases';
4
- import type { number_usd } from '../types/typeAliases';
5
1
  import type { string_date_iso8601 } from '../types/typeAliases';
6
2
  import type { string_model_name } from '../types/typeAliases';
3
+ import type { TODO_object } from '../utils/organization/TODO_object';
7
4
  import type { EmbeddingVector } from './EmbeddingVector';
5
+ import type { PromptResultUsage } from './PromptResultUsage';
8
6
  /**
9
7
  * Prompt result is the simplest concept of execution.
10
8
  * It is the result of executing one prompt _(NOT a template)_.
11
9
  *
12
10
  * @see https://github.com/webgptorg/promptbook#prompt-result
13
11
  */
14
- export type PromptResult = PromptCompletionResult | PromptChatResult | PromptEmbeddingResult;
12
+ export type PromptResult = CompletionPromptResult | ChatPromptResult | EmbeddingPromptResult;
15
13
  /**
16
- * Prompt completion result
17
- * It contains only the following text NOT the whole completion
14
+ * Completion prompt result
18
15
  *
16
+ * Note:It contains only the newly generated text NOT the whole completion
19
17
  * Note: This is fully serializable as JSON
20
18
  */
21
- export type PromptCompletionResult = PromptCommonResult;
19
+ export type CompletionPromptResult = CommonPromptResult;
22
20
  /**
23
- * Prompt chat result
21
+ *Chat prompt result
24
22
  *
25
23
  * Note: This is fully serializable as JSON
26
24
  */
27
- export type PromptChatResult = PromptCommonResult & {};
25
+ export type ChatPromptResult = CommonPromptResult & {};
28
26
  /**
29
- * Prompt embedding result
30
- * It contains only the following text NOT the whole completion
27
+ * Embedding prompt result
31
28
  *
32
29
  * Note: This is fully serializable as JSON
33
30
  */
34
- export type PromptEmbeddingResult = Omit<PromptCommonResult, 'content'> & {
31
+ export type EmbeddingPromptResult = Omit<CommonPromptResult, 'content'> & {
35
32
  /**
36
33
  * The response from the model
37
34
  */
@@ -42,7 +39,7 @@ export type PromptEmbeddingResult = Omit<PromptCommonResult, 'content'> & {
42
39
  *
43
40
  * Note: This is fully serializable as JSON
44
41
  */
45
- export type PromptCommonResult = {
42
+ export type CommonPromptResult = {
46
43
  /**
47
44
  * Exact text response from the model
48
45
  */
@@ -75,52 +72,13 @@ export type PromptCommonResult = {
75
72
  /**
76
73
  * Raw response from the model
77
74
  */
78
- readonly rawResponse: object;
79
- };
80
- /**
81
- * Usage statistics for one or many prompt results
82
- */
83
- export type PromptResultUsage = {
84
- /**
85
- * Cost of the execution in USD
86
- *
87
- * Note: If the cost is unknown, the value 0 and isUncertain is true
88
- */
89
- price: UncertainNumber;
90
- /**
91
- * Number of whatever used in the input aka. `prompt_tokens`
92
- */
93
- input: PromptResultUsageCounts;
94
- /**
95
- * Number of tokens used in the output aka. `completion_tokens`
96
- */
97
- output: PromptResultUsageCounts;
98
- };
99
- /**
100
- * Record of all possible measurable units
101
- */
102
- export type PromptResultUsageCounts = Record<`${KebabCase<'TOKENS' | ExpectationUnit>}Count`, UncertainNumber>;
103
- /**
104
- * Number which can be uncertain
105
- *
106
- * Note: If the value is completelly unknown, the value 0 and isUncertain is true
107
- * Note: Not using NaN or null because it looses the value which is better to be uncertain then not to be at all
108
- */
109
- export type UncertainNumber = {
110
- /**
111
- * The numeric value
112
- */
113
- readonly value: number_usd & (number_positive | 0);
114
- /**
115
- * Is the value uncertain
116
- */
117
- readonly isUncertain?: true;
75
+ readonly rawResponse: TODO_object;
118
76
  };
119
77
  /**
120
78
  * TODO: [🧠] Maybe timing more accurate then seconds?
121
79
  * TODO: [🧠] Should here be link to the prompt?
122
- * TODO: [🧠] Maybe type raw properly - not onject but OpenAI.result.whatever
80
+ * TODO: [🧠] Maybe type `rawResponse` properly - not onject but OpenAI.result.whatever
123
81
  * TODO: [🧠] Maybe remove redundant raw.choices.text
124
82
  * TODO: Log raw even if prompt failed - log the raw error
125
- * TODO: [🏳] Add `PromptTranslationResult`
83
+ * TODO: [🏳] Add `TranslationPromptResult`
126
84
  */
@@ -0,0 +1,26 @@
1
+ import type { KebabCase } from 'type-fest';
2
+ import type { ExpectationUnit } from '../types/PipelineJson/Expectations';
3
+ import type { UncertainNumber } from './UncertainNumber';
4
+ /**
5
+ * Usage statistics for one or many prompt results
6
+ */
7
+ export type PromptResultUsage = {
8
+ /**
9
+ * Cost of the execution in USD
10
+ *
11
+ * Note: If the cost is unknown, the value 0 and isUncertain is true
12
+ */
13
+ price: UncertainNumber;
14
+ /**
15
+ * Number of whatever used in the input aka. `prompt_tokens`
16
+ */
17
+ input: PromptResultUsageCounts;
18
+ /**
19
+ * Number of tokens used in the output aka. `completion_tokens`
20
+ */
21
+ output: PromptResultUsageCounts;
22
+ };
23
+ /**
24
+ * Record of all possible measurable units
25
+ */
26
+ export type PromptResultUsageCounts = Record<`${KebabCase<'TOKENS' | ExpectationUnit>}Count`, UncertainNumber>;
@@ -0,0 +1,18 @@
1
+ import type { number_positive } from '../types/typeAliases';
2
+ import type { number_usd } from '../types/typeAliases';
3
+ /**
4
+ * Number which can be uncertain
5
+ *
6
+ * Note: If the value is completelly unknown, the value 0 and isUncertain is true
7
+ * Note: Not using NaN or null because it looses the value which is better to be uncertain then not to be at all
8
+ */
9
+ export type UncertainNumber = {
10
+ /**
11
+ * The numeric value
12
+ */
13
+ readonly value: number_usd & (number_positive | 0);
14
+ /**
15
+ * Is the value uncertain
16
+ */
17
+ readonly isUncertain?: true;
18
+ };
@@ -1,7 +1,63 @@
1
- import type { PromptResultUsage } from '../PromptResult';
1
+ import type { PromptResultUsage } from '../PromptResultUsage';
2
+ /**
3
+ * @@@
4
+ *
5
+ * TODO: [🔼] Export with addUsage
6
+ */
7
+ export declare const ZERO_USAGE: import("type-fest/source/readonly-deep").ReadonlyObjectDeep<{
8
+ readonly price: {
9
+ readonly value: 0;
10
+ };
11
+ readonly input: {
12
+ readonly tokensCount: {
13
+ readonly value: 0;
14
+ };
15
+ readonly charactersCount: {
16
+ readonly value: 0;
17
+ };
18
+ readonly wordsCount: {
19
+ readonly value: 0;
20
+ };
21
+ readonly sentencesCount: {
22
+ readonly value: 0;
23
+ };
24
+ readonly linesCount: {
25
+ readonly value: 0;
26
+ };
27
+ readonly paragraphsCount: {
28
+ readonly value: 0;
29
+ };
30
+ readonly pagesCount: {
31
+ readonly value: 0;
32
+ };
33
+ };
34
+ readonly output: {
35
+ readonly tokensCount: {
36
+ readonly value: 0;
37
+ };
38
+ readonly charactersCount: {
39
+ readonly value: 0;
40
+ };
41
+ readonly wordsCount: {
42
+ readonly value: 0;
43
+ };
44
+ readonly sentencesCount: {
45
+ readonly value: 0;
46
+ };
47
+ readonly linesCount: {
48
+ readonly value: 0;
49
+ };
50
+ readonly paragraphsCount: {
51
+ readonly value: 0;
52
+ };
53
+ readonly pagesCount: {
54
+ readonly value: 0;
55
+ };
56
+ };
57
+ }>;
2
58
  /**
3
59
  * Function `addUsage` will add multiple usages into one
4
60
  *
5
- * Note: If you provide 0 values, it returns void usage
61
+ * Note: If you provide 0 values, it returns ZERO_USAGE
6
62
  */
7
63
  export declare function addUsage(...usageItems: Array<PromptResultUsage>): PromptResultUsage;
@@ -1,4 +1,4 @@
1
- import type { PromptResultUsageCounts } from '../PromptResult';
1
+ import type { PromptResultUsageCounts } from '../PromptResultUsage';
2
2
  /**
3
3
  * Helper of usage compute
4
4
  *
@@ -1,4 +1,4 @@
1
- import type { UncertainNumber } from '../PromptResult';
1
+ import type { UncertainNumber } from '../UncertainNumber';
2
2
  /**
3
3
  * Make UncertainNumber
4
4
  *
@@ -1,5 +1,5 @@
1
- import type { PromptResultUsage } from '../PromptResult';
2
- import type { UncertainNumber } from '../PromptResult';
1
+ import type { PromptResultUsage } from '../PromptResultUsage';
2
+ import type { UncertainNumber } from '../UncertainNumber';
3
3
  /**
4
4
  * Function usageToWorktime will take usage and estimate saved worktime in hours of reading / writing
5
5
  *
@@ -1,8 +1,16 @@
1
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ export type CreateLlmToolsFromEnvOptions = {
3
+ /**
4
+ * This will will be passed to the created `LlmExecutionTools`
5
+ *
6
+ * @default false
7
+ */
8
+ isVerbose?: boolean;
9
+ };
2
10
  /**
3
11
  * @@@
4
12
  *
5
- * Note: This function is not cached, every call creates new instance of LlmExecutionTools
13
+ * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
6
14
  *
7
15
  * It looks for environment variables:
8
16
  * - `process.env.OPENAI_API_KEY`
@@ -10,7 +18,7 @@ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
10
18
  *
11
19
  * @returns @@@
12
20
  */
13
- export declare function createLlmToolsFromEnv(): LlmExecutionTools;
21
+ export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
14
22
  /**
15
23
  * TODO: [🔼] !!! Export via `@promptbook/node`
16
24
  * TODO: @@@ write discussion about this - wizzard
@@ -1,10 +1,11 @@
1
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
2
3
  /**
3
4
  * Returns LLM tools for testing purposes
4
5
  *
5
6
  * @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
6
7
  */
7
- export declare function getLlmToolsForTestingAndScriptsAndPlayground(): LlmExecutionTools;
8
+ export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
8
9
  /**
9
10
  * Note: [⚪] This should never be in any released package
10
11
  */
@@ -1,5 +1,5 @@
1
1
  import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
- import type { PromptResultUsage } from '../../../../execution/PromptResult';
2
+ import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
3
3
  /**
4
4
  * LLM tools with option to get total cost of the execution
5
5
  */
@@ -0,0 +1,32 @@
1
+ import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
2
+ import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
3
+ import type { PromptbookStorage } from '../../../../storage/_common/PromptbookStorage';
4
+ import type { TODO_any } from '../../../../utils/organization/TODO_any';
5
+ import type { LlmExecutionToolsWithTotalCost } from './LlmExecutionToolsWithTotalCost';
6
+ /**
7
+ * Options for `limitTotalCost`
8
+ */
9
+ type LimitTotalCostOptions = {
10
+ /**
11
+ * @@@
12
+ *
13
+ * @default ZERO_USAGE
14
+ */
15
+ maxTotalCost: PromptResultUsage;
16
+ /**
17
+ * @@@
18
+ *
19
+ * @default MemoryStorage
20
+ */
21
+ storage: PromptbookStorage<TODO_any>;
22
+ };
23
+ /**
24
+ * @@@
25
+ */
26
+ export declare function limitTotalCost(llmTools: LlmExecutionTools, options?: Partial<LimitTotalCostOptions>): LlmExecutionToolsWithTotalCost;
27
+ export {};
28
+ /**
29
+ * TODO: [🔼] !!! Export via `@promptbookcore/`
30
+ * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
31
+ * TODO: [🧠] Is there some meaningfull way how to test this util
32
+ */
@@ -1,6 +1,6 @@
1
1
  import type { AvailableModel } from '../../execution/LlmExecutionTools';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { PromptChatResult } from '../../execution/PromptResult';
3
+ import type { ChatPromptResult } from '../../execution/PromptResult';
4
4
  import type { Prompt } from '../../types/Prompt';
5
5
  import type { string_markdown } from '../../types/typeAliases';
6
6
  import type { string_markdown_text } from '../../types/typeAliases';
@@ -26,7 +26,7 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
26
26
  /**
27
27
  * Calls Anthropic Claude API to use a chat model.
28
28
  */
29
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
29
+ callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<ChatPromptResult>;
30
30
  /**
31
31
  * Get the model that should be used as default
32
32
  */
@@ -1,7 +1,7 @@
1
1
  import type { AvailableModel } from '../../execution/LlmExecutionTools';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { PromptChatResult } from '../../execution/PromptResult';
4
- import type { PromptCompletionResult } from '../../execution/PromptResult';
3
+ import type { ChatPromptResult } from '../../execution/PromptResult';
4
+ import type { CompletionPromptResult } from '../../execution/PromptResult';
5
5
  import type { Prompt } from '../../types/Prompt';
6
6
  import type { string_markdown } from '../../types/typeAliases';
7
7
  import type { string_markdown_text } from '../../types/typeAliases';
@@ -27,11 +27,11 @@ export declare class AzureOpenAiExecutionTools implements LlmExecutionTools {
27
27
  /**
28
28
  * Calls OpenAI API to use a chat model.
29
29
  */
30
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
30
+ callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<ChatPromptResult>;
31
31
  /**
32
32
  * Calls Azure OpenAI API to use a complete model.
33
33
  */
34
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
34
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<CompletionPromptResult>;
35
35
  /**
36
36
  * Changes Azure error (which is not propper Error but object) to propper Error
37
37
  */
@@ -1,8 +1,8 @@
1
1
  import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
2
2
  import type { AvailableModel } from '../../execution/LlmExecutionTools';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { PromptChatResult } from '../../execution/PromptResult';
5
- import type { PromptCompletionResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult } from '../../execution/PromptResult';
5
+ import type { CompletionPromptResult } from '../../execution/PromptResult';
6
6
  import type { Prompt } from '../../types/Prompt';
7
7
  import type { string_markdown } from '../../types/typeAliases';
8
8
  import type { string_markdown_text } from '../../types/typeAliases';
@@ -18,11 +18,11 @@ export declare class MockedEchoLlmExecutionTools implements LlmExecutionTools {
18
18
  /**
19
19
  * Mocks chat model
20
20
  */
21
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptChatResult>;
21
+ callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<ChatPromptResult>;
22
22
  /**
23
23
  * Mocks completion model
24
24
  */
25
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
25
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<CompletionPromptResult>;
26
26
  /**
27
27
  * List all available mocked-models that can be used
28
28
  */
@@ -1,9 +1,9 @@
1
1
  import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutionToolsOptions';
2
2
  import type { AvailableModel } from '../../execution/LlmExecutionTools';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { PromptChatResult } from '../../execution/PromptResult';
5
- import type { PromptCompletionResult } from '../../execution/PromptResult';
6
- import type { PromptEmbeddingResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult } from '../../execution/PromptResult';
5
+ import type { CompletionPromptResult } from '../../execution/PromptResult';
6
+ import type { EmbeddingPromptResult } from '../../execution/PromptResult';
7
7
  import type { Prompt } from '../../types/Prompt';
8
8
  import type { string_markdown } from '../../types/typeAliases';
9
9
  import type { string_markdown_text } from '../../types/typeAliases';
@@ -19,15 +19,15 @@ export declare class MockedFackedLlmExecutionTools implements LlmExecutionTools
19
19
  /**
20
20
  * Fakes chat model
21
21
  */
22
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptChatResult & PromptCompletionResult>;
22
+ callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<ChatPromptResult & CompletionPromptResult>;
23
23
  /**
24
24
  * Fakes completion model
25
25
  */
26
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptCompletionResult>;
26
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<CompletionPromptResult>;
27
27
  /**
28
28
  * Fakes embedding model
29
29
  */
30
- callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<PromptEmbeddingResult>;
30
+ callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectations' | 'postprocessing'>): Promise<EmbeddingPromptResult>;
31
31
  /**
32
32
  * List all available fake-models that can be used
33
33
  */
@@ -1,9 +1,11 @@
1
1
  import type { AvailableModel } from '../../execution/LlmExecutionTools';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { PromptChatResult } from '../../execution/PromptResult';
4
- import type { PromptCompletionResult } from '../../execution/PromptResult';
5
- import type { PromptEmbeddingResult } from '../../execution/PromptResult';
6
- import type { Prompt } from '../../types/Prompt';
3
+ import type { ChatPromptResult } from '../../execution/PromptResult';
4
+ import type { CompletionPromptResult } from '../../execution/PromptResult';
5
+ import type { EmbeddingPromptResult } from '../../execution/PromptResult';
6
+ import type { ChatPrompt } from '../../types/Prompt';
7
+ import type { CompletionPrompt } from '../../types/Prompt';
8
+ import type { EmbeddingPrompt } from '../../types/Prompt';
7
9
  import type { string_markdown } from '../../types/typeAliases';
8
10
  import type { string_markdown_text } from '../../types/typeAliases';
9
11
  import type { string_title } from '../../types/typeAliases';
@@ -26,15 +28,15 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
26
28
  /**
27
29
  * Calls the best available chat model
28
30
  */
29
- callChatModel(prompt: Prompt): Promise<PromptChatResult>;
31
+ callChatModel(prompt: ChatPrompt): Promise<ChatPromptResult>;
30
32
  /**
31
33
  * Calls the best available completion model
32
34
  */
33
- callCompletionModel(prompt: Prompt): Promise<PromptCompletionResult>;
35
+ callCompletionModel(prompt: CompletionPrompt): Promise<CompletionPromptResult>;
34
36
  /**
35
37
  * Calls the best available embedding model
36
38
  */
37
- callEmbeddingModel(prompt: Prompt): Promise<PromptEmbeddingResult>;
39
+ callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
38
40
  /**
39
41
  * Calls the best available model
40
42
  */
@@ -1,8 +1,8 @@
1
1
  import type { AvailableModel } from '../../execution/LlmExecutionTools';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { PromptChatResult } from '../../execution/PromptResult';
4
- import type { PromptCompletionResult } from '../../execution/PromptResult';
5
- import type { PromptEmbeddingResult } from '../../execution/PromptResult';
3
+ import type { ChatPromptResult } from '../../execution/PromptResult';
4
+ import type { CompletionPromptResult } from '../../execution/PromptResult';
5
+ import type { EmbeddingPromptResult } from '../../execution/PromptResult';
6
6
  import type { Prompt } from '../../types/Prompt';
7
7
  import type { string_markdown } from '../../types/typeAliases';
8
8
  import type { string_markdown_text } from '../../types/typeAliases';
@@ -28,15 +28,15 @@ export declare class OpenAiExecutionTools implements LlmExecutionTools {
28
28
  /**
29
29
  * Calls OpenAI API to use a chat model.
30
30
  */
31
- callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectFormat'>): Promise<PromptChatResult>;
31
+ callChatModel(prompt: Pick<Prompt, 'content' | 'modelRequirements' | 'expectFormat'>): Promise<ChatPromptResult>;
32
32
  /**
33
33
  * Calls OpenAI API to use a complete model.
34
34
  */
35
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>;
35
+ callCompletionModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<CompletionPromptResult>;
36
36
  /**
37
37
  * Calls OpenAI API to use a embedding model
38
38
  */
39
- callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptEmbeddingResult>;
39
+ callEmbeddingModel(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<EmbeddingPromptResult>;
40
40
  /**
41
41
  * Get the model that should be used as default
42
42
  */
@@ -1,5 +1,5 @@
1
1
  import type OpenAI from 'openai';
2
- import type { PromptResultUsage } from '../../execution/PromptResult';
2
+ import type { PromptResultUsage } from '../../execution/PromptResultUsage';
3
3
  import type { Prompt } from '../../types/Prompt';
4
4
  /**
5
5
  * Computes the usage of the OpenAI API based on the response from OpenAI