@promptbook/markdown-utils 0.61.0 → 0.62.0-0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/typings/src/_packages/core.index.d.ts +2 -1
- package/esm/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
- package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -0
- package/esm/typings/src/execution/utils/usageToHuman.d.ts +15 -0
- package/esm/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
- package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +1 -0
- package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +3 -2
- package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
- package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
- package/esm/typings/src/llm-providers/_common/utils/{count-total-cost/LlmExecutionToolsWithTotalCost.d.ts → count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts} +5 -2
- package/{umd/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → esm/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
- package/esm/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
- package/esm/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
- package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
- package/package.json +3 -3
- package/umd/typings/src/_packages/core.index.d.ts +2 -1
- package/umd/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
- package/umd/typings/src/execution/createPipelineExecutor.d.ts +1 -0
- package/umd/typings/src/execution/utils/usageToHuman.d.ts +15 -0
- package/umd/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
- package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +1 -0
- package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +3 -2
- package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
- package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +1 -1
- package/umd/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
- package/umd/typings/src/llm-providers/_common/utils/{count-total-cost/LlmExecutionToolsWithTotalCost.d.ts → count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts} +5 -2
- package/{esm/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → umd/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
- package/umd/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
- package/umd/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
- package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
|
@@ -26,6 +26,7 @@ import { embeddingVectorToString } from '../execution/embeddingVectorToString';
|
|
|
26
26
|
import { addUsage } from '../execution/utils/addUsage';
|
|
27
27
|
import { checkExpectations, isPassingExpectations } from '../execution/utils/checkExpectations';
|
|
28
28
|
import { usageToWorktime } from '../execution/utils/usageToWorktime';
|
|
29
|
+
import { usageToHuman } from '../execution/utils/usageToHuman';
|
|
29
30
|
import { CallbackInterfaceTools } from '../knowledge/dialogs/callback/CallbackInterfaceTools';
|
|
30
31
|
import type { CallbackInterfaceToolsOptions } from '../knowledge/dialogs/callback/CallbackInterfaceToolsOptions';
|
|
31
32
|
import { SimplePromptInterfaceTools } from '../knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools';
|
|
@@ -39,7 +40,7 @@ import { executionReportJsonToString } from '../types/execution-report/execution
|
|
|
39
40
|
import { PROMPTBOOK_VERSION } from '../version';
|
|
40
41
|
export { PROMPTBOOK_VERSION };
|
|
41
42
|
export { BlockTypes, RESERVED_PARAMETER_NAMES };
|
|
42
|
-
export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, };
|
|
43
|
+
export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, usageToHuman, };
|
|
43
44
|
export { collectionToJson, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createSubcollection, };
|
|
44
45
|
export { SimplePromptInterfaceTools };
|
|
45
46
|
export { pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, stringifyPipelineJson, validatePipeline, };
|
|
@@ -10,5 +10,6 @@ export declare function pipelineJsonToString(pipelineJson: PipelineJson): Pipeli
|
|
|
10
10
|
/**
|
|
11
11
|
* TODO: !!!! Implement new features and commands into `promptTemplateParameterJsonToString`
|
|
12
12
|
* TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
|
|
13
|
-
* TODO:
|
|
13
|
+
* TODO: [🏛] Maybe make some markdown builder
|
|
14
|
+
* TODO: [🏛] Escape all
|
|
14
15
|
*/
|
|
@@ -58,6 +58,7 @@ export declare function createPipelineExecutor(options: CreatePipelineExecutorOp
|
|
|
58
58
|
export {};
|
|
59
59
|
/**
|
|
60
60
|
* TODO: Use isVerbose here (not only pass to `preparePipeline`)
|
|
61
|
+
* TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
|
|
61
62
|
* TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
|
|
62
63
|
* TODO: [♈] Probbably move expectations from templates to parameters
|
|
63
64
|
* TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { string_markdown } from '../../types/typeAliases';
|
|
2
|
+
import type { PromptResultUsage } from '../PromptResultUsage';
|
|
3
|
+
/**
|
|
4
|
+
* Function `usageToHuman` will take usage and convert it to human readable report
|
|
5
|
+
*/
|
|
6
|
+
export declare function usageToHuman(usage: PromptResultUsage): string_markdown;
|
|
7
|
+
/**
|
|
8
|
+
* TODO: Use "$1" not "1 USD"
|
|
9
|
+
* TODO: Use markdown formatting like "Cost approximately **$1**"
|
|
10
|
+
* TODO: Report in minutes, seconds, days NOT 0.1 hours
|
|
11
|
+
* TODO: [🧠] Maybe make from `uncertainNumberToHuman` separate exported utility
|
|
12
|
+
* TODO: When negligible usage, report "Negligible" or just don't report it
|
|
13
|
+
* TODO: [🧠] Maybe use "~" instead of "approximately"
|
|
14
|
+
* TODO: [🏛] Maybe make some markdown builder
|
|
15
|
+
*/
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -27,4 +27,5 @@ export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOpt
|
|
|
27
27
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
28
28
|
* TODO: [🧠] Maybe pass env as argument
|
|
29
29
|
* Note: [🟢] This code should never be published outside of `@promptbook/node`
|
|
30
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
30
31
|
*/
|
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
|
2
2
|
/**
|
|
3
3
|
* Returns LLM tools for CLI
|
|
4
4
|
*
|
|
5
5
|
* @private within the repository - for CLI utils
|
|
6
6
|
*/
|
|
7
|
-
export declare function getLlmToolsForCli():
|
|
7
|
+
export declare function getLlmToolsForCli(): LlmExecutionToolsWithTotalUsage;
|
|
8
8
|
/**
|
|
9
9
|
* Note: [🟡] This code should never be published outside of `@promptbook/cli`
|
|
10
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
10
11
|
*/
|
package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
|
-
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
2
1
|
import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
|
|
2
|
+
import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
|
3
3
|
/**
|
|
4
4
|
* Returns LLM tools for testing purposes
|
|
5
5
|
*
|
|
6
6
|
* @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
|
|
7
7
|
*/
|
|
8
|
-
export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions):
|
|
8
|
+
export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionToolsWithTotalUsage;
|
|
9
9
|
/**
|
|
10
10
|
* Note: [⚪] This should never be in any released package
|
|
11
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
11
12
|
*/
|
|
@@ -3,14 +3,17 @@ import type { CacheLlmToolsOptions } from './CacheLlmToolsOptions';
|
|
|
3
3
|
/**
|
|
4
4
|
* Intercepts LLM tools and counts total usage of the tools
|
|
5
5
|
*
|
|
6
|
-
*
|
|
6
|
+
* Note: It can take extended `LlmExecutionTools` and cache the
|
|
7
|
+
*
|
|
8
|
+
* @param llmTools LLM tools to be intercepted with usage counting, it can contain extra methods like `totalUsage`
|
|
7
9
|
* @returns LLM tools with same functionality with added total cost counting
|
|
8
10
|
*/
|
|
9
|
-
export declare function cacheLlmTools(llmTools:
|
|
11
|
+
export declare function cacheLlmTools<TLlmTools extends LlmExecutionTools>(llmTools: TLlmTools, options?: Partial<CacheLlmToolsOptions>): TLlmTools;
|
|
10
12
|
/**
|
|
11
13
|
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
12
|
-
* TODO: @@@ write discussion about this and storages
|
|
13
|
-
* write how to combine multiple interceptors
|
|
14
14
|
* TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
|
|
15
15
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
16
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
17
|
+
* @@@ write discussion about this and storages
|
|
18
|
+
* @@@ write how to combine multiple interceptors
|
|
16
19
|
*/
|
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
|
|
2
2
|
import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
|
|
3
3
|
/**
|
|
4
|
-
* LLM tools with option to get total
|
|
4
|
+
* LLM tools with option to get total usage of the execution
|
|
5
5
|
*/
|
|
6
|
-
export type
|
|
6
|
+
export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
|
|
7
7
|
/**
|
|
8
8
|
* Total cost of the execution
|
|
9
9
|
*/
|
|
10
10
|
totalUsage: PromptResultUsage;
|
|
11
11
|
};
|
|
12
|
+
/**
|
|
13
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
14
|
+
*/
|
|
@@ -1,14 +1,17 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
|
|
2
|
-
import type {
|
|
2
|
+
import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
|
|
3
3
|
/**
|
|
4
4
|
* Intercepts LLM tools and counts total usage of the tools
|
|
5
5
|
*
|
|
6
6
|
* @param llmTools LLM tools to be intercepted with usage counting
|
|
7
7
|
* @returns LLM tools with same functionality with added total cost counting
|
|
8
8
|
*/
|
|
9
|
-
export declare function countTotalUsage(llmTools: LlmExecutionTools):
|
|
9
|
+
export declare function countTotalUsage(llmTools: LlmExecutionTools): LlmExecutionToolsWithTotalUsage;
|
|
10
10
|
/**
|
|
11
11
|
* TODO: [🔼] !!! Export via `@promptbookcore/`
|
|
12
12
|
* TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
|
|
13
13
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
14
|
+
* TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
|
|
15
|
+
* > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
|
|
16
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
14
17
|
*/
|
|
@@ -2,17 +2,17 @@ import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools'
|
|
|
2
2
|
import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
|
|
3
3
|
import type { PromptbookStorage } from '../../../../storage/_common/PromptbookStorage';
|
|
4
4
|
import type { TODO_any } from '../../../../utils/organization/TODO_any';
|
|
5
|
-
import type {
|
|
5
|
+
import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
|
|
6
6
|
/**
|
|
7
|
-
* Options for `
|
|
7
|
+
* Options for `limitTotalUsage`
|
|
8
8
|
*/
|
|
9
|
-
type
|
|
9
|
+
type LimitTotalUsageOptions = {
|
|
10
10
|
/**
|
|
11
11
|
* @@@
|
|
12
12
|
*
|
|
13
13
|
* @default ZERO_USAGE
|
|
14
14
|
*/
|
|
15
|
-
|
|
15
|
+
maxTotalUsage: PromptResultUsage;
|
|
16
16
|
/**
|
|
17
17
|
* @@@
|
|
18
18
|
*
|
|
@@ -23,10 +23,13 @@ type LimitTotalCostOptions = {
|
|
|
23
23
|
/**
|
|
24
24
|
* @@@
|
|
25
25
|
*/
|
|
26
|
-
export declare function
|
|
26
|
+
export declare function limitTotalUsage(llmTools: LlmExecutionTools, options?: Partial<LimitTotalUsageOptions>): LlmExecutionToolsWithTotalUsage;
|
|
27
27
|
export {};
|
|
28
28
|
/**
|
|
29
29
|
* TODO: [🔼] !!! Export via `@promptbookcore/`
|
|
30
|
+
* TODO: Maybe internally use `countTotalUsage`
|
|
30
31
|
* TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
|
|
31
32
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
33
|
+
* TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
|
|
34
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
32
35
|
*/
|
|
@@ -14,3 +14,6 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
|
14
14
|
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`
|
|
15
15
|
*/
|
|
16
16
|
export declare function joinLlmExecutionTools(...llmExecutionTools: Array<LlmExecutionTools>): MultipleLlmExecutionTools;
|
|
17
|
+
/**
|
|
18
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
19
|
+
*/
|
|
@@ -12,6 +12,5 @@ export declare function preparePipeline(pipeline: PipelineJson, options: Prepare
|
|
|
12
12
|
* TODO: Write tests for `preparePipeline`
|
|
13
13
|
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
14
14
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
15
|
-
* TODO: [🎐] !!!!! Use here countTotalUsage
|
|
16
15
|
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
17
16
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/markdown-utils",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.62.0-0",
|
|
4
4
|
"description": "Supercharge your use of large language models",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -34,7 +34,7 @@
|
|
|
34
34
|
},
|
|
35
35
|
"homepage": "https://www.npmjs.com/package/@promptbook/core",
|
|
36
36
|
"dependencies": {
|
|
37
|
-
"spacetrim": "0.11.
|
|
37
|
+
"spacetrim": "0.11.39"
|
|
38
38
|
},
|
|
39
39
|
"funding": [
|
|
40
40
|
{
|
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
}
|
|
48
48
|
],
|
|
49
49
|
"peerDependencies": {
|
|
50
|
-
"@promptbook/core": "0.
|
|
50
|
+
"@promptbook/core": "0.62.0-0"
|
|
51
51
|
},
|
|
52
52
|
"main": "./umd/index.umd.js",
|
|
53
53
|
"module": "./esm/index.es.js",
|
|
@@ -26,6 +26,7 @@ import { embeddingVectorToString } from '../execution/embeddingVectorToString';
|
|
|
26
26
|
import { addUsage } from '../execution/utils/addUsage';
|
|
27
27
|
import { checkExpectations, isPassingExpectations } from '../execution/utils/checkExpectations';
|
|
28
28
|
import { usageToWorktime } from '../execution/utils/usageToWorktime';
|
|
29
|
+
import { usageToHuman } from '../execution/utils/usageToHuman';
|
|
29
30
|
import { CallbackInterfaceTools } from '../knowledge/dialogs/callback/CallbackInterfaceTools';
|
|
30
31
|
import type { CallbackInterfaceToolsOptions } from '../knowledge/dialogs/callback/CallbackInterfaceToolsOptions';
|
|
31
32
|
import { SimplePromptInterfaceTools } from '../knowledge/dialogs/simple-prompt/SimplePromptInterfaceTools';
|
|
@@ -39,7 +40,7 @@ import { executionReportJsonToString } from '../types/execution-report/execution
|
|
|
39
40
|
import { PROMPTBOOK_VERSION } from '../version';
|
|
40
41
|
export { PROMPTBOOK_VERSION };
|
|
41
42
|
export { BlockTypes, RESERVED_PARAMETER_NAMES };
|
|
42
|
-
export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, };
|
|
43
|
+
export { addUsage, assertsExecutionSuccessful, checkExpectations, embeddingVectorToString, executionReportJsonToString, ExecutionReportStringOptions, ExecutionReportStringOptionsDefaults, isPassingExpectations, prepareKnowledgeFromMarkdown, prettifyPipelineString, usageToWorktime, usageToHuman, };
|
|
43
44
|
export { collectionToJson, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createSubcollection, };
|
|
44
45
|
export { SimplePromptInterfaceTools };
|
|
45
46
|
export { pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, stringifyPipelineJson, validatePipeline, };
|
|
@@ -10,5 +10,6 @@ export declare function pipelineJsonToString(pipelineJson: PipelineJson): Pipeli
|
|
|
10
10
|
/**
|
|
11
11
|
* TODO: !!!! Implement new features and commands into `promptTemplateParameterJsonToString`
|
|
12
12
|
* TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
|
|
13
|
-
* TODO:
|
|
13
|
+
* TODO: [🏛] Maybe make some markdown builder
|
|
14
|
+
* TODO: [🏛] Escape all
|
|
14
15
|
*/
|
|
@@ -58,6 +58,7 @@ export declare function createPipelineExecutor(options: CreatePipelineExecutorOp
|
|
|
58
58
|
export {};
|
|
59
59
|
/**
|
|
60
60
|
* TODO: Use isVerbose here (not only pass to `preparePipeline`)
|
|
61
|
+
* TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
|
|
61
62
|
* TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
|
|
62
63
|
* TODO: [♈] Probbably move expectations from templates to parameters
|
|
63
64
|
* TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { string_markdown } from '../../types/typeAliases';
|
|
2
|
+
import type { PromptResultUsage } from '../PromptResultUsage';
|
|
3
|
+
/**
|
|
4
|
+
* Function `usageToHuman` will take usage and convert it to human readable report
|
|
5
|
+
*/
|
|
6
|
+
export declare function usageToHuman(usage: PromptResultUsage): string_markdown;
|
|
7
|
+
/**
|
|
8
|
+
* TODO: Use "$1" not "1 USD"
|
|
9
|
+
* TODO: Use markdown formatting like "Cost approximately **$1**"
|
|
10
|
+
* TODO: Report in minutes, seconds, days NOT 0.1 hours
|
|
11
|
+
* TODO: [🧠] Maybe make from `uncertainNumberToHuman` separate exported utility
|
|
12
|
+
* TODO: When negligible usage, report "Negligible" or just don't report it
|
|
13
|
+
* TODO: [🧠] Maybe use "~" instead of "approximately"
|
|
14
|
+
* TODO: [🏛] Maybe make some markdown builder
|
|
15
|
+
*/
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -27,4 +27,5 @@ export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOpt
|
|
|
27
27
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
28
28
|
* TODO: [🧠] Maybe pass env as argument
|
|
29
29
|
* Note: [🟢] This code should never be published outside of `@promptbook/node`
|
|
30
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
30
31
|
*/
|
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
import type {
|
|
1
|
+
import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
|
2
2
|
/**
|
|
3
3
|
* Returns LLM tools for CLI
|
|
4
4
|
*
|
|
5
5
|
* @private within the repository - for CLI utils
|
|
6
6
|
*/
|
|
7
|
-
export declare function getLlmToolsForCli():
|
|
7
|
+
export declare function getLlmToolsForCli(): LlmExecutionToolsWithTotalUsage;
|
|
8
8
|
/**
|
|
9
9
|
* Note: [🟡] This code should never be published outside of `@promptbook/cli`
|
|
10
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
10
11
|
*/
|
package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
|
-
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
|
2
1
|
import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
|
|
2
|
+
import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
|
|
3
3
|
/**
|
|
4
4
|
* Returns LLM tools for testing purposes
|
|
5
5
|
*
|
|
6
6
|
* @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
|
|
7
7
|
*/
|
|
8
|
-
export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions):
|
|
8
|
+
export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionToolsWithTotalUsage;
|
|
9
9
|
/**
|
|
10
10
|
* Note: [⚪] This should never be in any released package
|
|
11
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
11
12
|
*/
|
|
@@ -3,14 +3,17 @@ import type { CacheLlmToolsOptions } from './CacheLlmToolsOptions';
|
|
|
3
3
|
/**
|
|
4
4
|
* Intercepts LLM tools and counts total usage of the tools
|
|
5
5
|
*
|
|
6
|
-
*
|
|
6
|
+
* Note: It can take extended `LlmExecutionTools` and cache the
|
|
7
|
+
*
|
|
8
|
+
* @param llmTools LLM tools to be intercepted with usage counting, it can contain extra methods like `totalUsage`
|
|
7
9
|
* @returns LLM tools with same functionality with added total cost counting
|
|
8
10
|
*/
|
|
9
|
-
export declare function cacheLlmTools(llmTools:
|
|
11
|
+
export declare function cacheLlmTools<TLlmTools extends LlmExecutionTools>(llmTools: TLlmTools, options?: Partial<CacheLlmToolsOptions>): TLlmTools;
|
|
10
12
|
/**
|
|
11
13
|
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
12
|
-
* TODO: @@@ write discussion about this and storages
|
|
13
|
-
* write how to combine multiple interceptors
|
|
14
14
|
* TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
|
|
15
15
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
16
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
17
|
+
* @@@ write discussion about this and storages
|
|
18
|
+
* @@@ write how to combine multiple interceptors
|
|
16
19
|
*/
|
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
|
|
2
2
|
import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
|
|
3
3
|
/**
|
|
4
|
-
* LLM tools with option to get total
|
|
4
|
+
* LLM tools with option to get total usage of the execution
|
|
5
5
|
*/
|
|
6
|
-
export type
|
|
6
|
+
export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
|
|
7
7
|
/**
|
|
8
8
|
* Total cost of the execution
|
|
9
9
|
*/
|
|
10
10
|
totalUsage: PromptResultUsage;
|
|
11
11
|
};
|
|
12
|
+
/**
|
|
13
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
14
|
+
*/
|
|
@@ -1,14 +1,17 @@
|
|
|
1
1
|
import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools';
|
|
2
|
-
import type {
|
|
2
|
+
import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
|
|
3
3
|
/**
|
|
4
4
|
* Intercepts LLM tools and counts total usage of the tools
|
|
5
5
|
*
|
|
6
6
|
* @param llmTools LLM tools to be intercepted with usage counting
|
|
7
7
|
* @returns LLM tools with same functionality with added total cost counting
|
|
8
8
|
*/
|
|
9
|
-
export declare function countTotalUsage(llmTools: LlmExecutionTools):
|
|
9
|
+
export declare function countTotalUsage(llmTools: LlmExecutionTools): LlmExecutionToolsWithTotalUsage;
|
|
10
10
|
/**
|
|
11
11
|
* TODO: [🔼] !!! Export via `@promptbookcore/`
|
|
12
12
|
* TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
|
|
13
13
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
14
|
+
* TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
|
|
15
|
+
* > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
|
|
16
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
14
17
|
*/
|
|
@@ -2,17 +2,17 @@ import type { LlmExecutionTools } from '../../../../execution/LlmExecutionTools'
|
|
|
2
2
|
import type { PromptResultUsage } from '../../../../execution/PromptResultUsage';
|
|
3
3
|
import type { PromptbookStorage } from '../../../../storage/_common/PromptbookStorage';
|
|
4
4
|
import type { TODO_any } from '../../../../utils/organization/TODO_any';
|
|
5
|
-
import type {
|
|
5
|
+
import type { LlmExecutionToolsWithTotalUsage } from './LlmExecutionToolsWithTotalUsage';
|
|
6
6
|
/**
|
|
7
|
-
* Options for `
|
|
7
|
+
* Options for `limitTotalUsage`
|
|
8
8
|
*/
|
|
9
|
-
type
|
|
9
|
+
type LimitTotalUsageOptions = {
|
|
10
10
|
/**
|
|
11
11
|
* @@@
|
|
12
12
|
*
|
|
13
13
|
* @default ZERO_USAGE
|
|
14
14
|
*/
|
|
15
|
-
|
|
15
|
+
maxTotalUsage: PromptResultUsage;
|
|
16
16
|
/**
|
|
17
17
|
* @@@
|
|
18
18
|
*
|
|
@@ -23,10 +23,13 @@ type LimitTotalCostOptions = {
|
|
|
23
23
|
/**
|
|
24
24
|
* @@@
|
|
25
25
|
*/
|
|
26
|
-
export declare function
|
|
26
|
+
export declare function limitTotalUsage(llmTools: LlmExecutionTools, options?: Partial<LimitTotalUsageOptions>): LlmExecutionToolsWithTotalUsage;
|
|
27
27
|
export {};
|
|
28
28
|
/**
|
|
29
29
|
* TODO: [🔼] !!! Export via `@promptbookcore/`
|
|
30
|
+
* TODO: Maybe internally use `countTotalUsage`
|
|
30
31
|
* TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
|
|
31
32
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
33
|
+
* TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
|
|
34
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
32
35
|
*/
|
|
@@ -14,3 +14,6 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
|
|
|
14
14
|
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`
|
|
15
15
|
*/
|
|
16
16
|
export declare function joinLlmExecutionTools(...llmExecutionTools: Array<LlmExecutionTools>): MultipleLlmExecutionTools;
|
|
17
|
+
/**
|
|
18
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
19
|
+
*/
|
|
@@ -12,6 +12,5 @@ export declare function preparePipeline(pipeline: PipelineJson, options: Prepare
|
|
|
12
12
|
* TODO: Write tests for `preparePipeline`
|
|
13
13
|
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
14
14
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
15
|
-
* TODO: [🎐] !!!!! Use here countTotalUsage
|
|
16
15
|
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
17
16
|
*/
|