@promptbook/markdown-utils 0.105.0-1 → 0.105.0-4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +36 -77
- package/esm/index.es.js +80 -63
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +4 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +10 -3
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +11 -1
- package/esm/typings/src/book-2.0/agent-source/communication-samples.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.blocks.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.import.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/parseAgentSource.import.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/parseAgentSourceWithCommitments.blocks.test.d.ts +1 -0
- package/esm/typings/src/commitments/USE_TIME/USE_TIME.d.ts +40 -0
- package/esm/typings/src/commitments/USE_TIME/USE_TIME.test.d.ts +1 -0
- package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +8 -0
- package/esm/typings/src/commitments/_base/CommitmentDefinition.d.ts +8 -0
- package/esm/typings/src/commitments/index.d.ts +11 -2
- package/esm/typings/src/config.d.ts +1 -0
- package/esm/typings/src/import-plugins/$fileImportPlugins.d.ts +7 -0
- package/esm/typings/src/import-plugins/AgentFileImportPlugin.d.ts +7 -0
- package/esm/typings/src/import-plugins/FileImportPlugin.d.ts +24 -0
- package/esm/typings/src/import-plugins/JsonFileImportPlugin.d.ts +7 -0
- package/esm/typings/src/import-plugins/TextFileImportPlugin.d.ts +7 -0
- package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/countUsage.d.ts +2 -2
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +9 -2
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +3 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +10 -0
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/scripting/javascript/JavascriptExecutionToolsOptions.d.ts +6 -1
- package/esm/typings/src/types/ModelRequirements.d.ts +6 -12
- package/esm/typings/src/utils/execCommand/$execCommandNormalizeOptions.d.ts +2 -3
- package/esm/typings/src/utils/execCommand/ExecCommandOptions.d.ts +7 -1
- package/esm/typings/src/utils/organization/keepImported.d.ts +9 -0
- package/esm/typings/src/utils/organization/keepTypeImported.d.ts +0 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +80 -63
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -24,7 +24,7 @@
|
|
|
24
24
|
* @generated
|
|
25
25
|
* @see https://github.com/webgptorg/promptbook
|
|
26
26
|
*/
|
|
27
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.105.0-
|
|
27
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.105.0-4';
|
|
28
28
|
/**
|
|
29
29
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
30
30
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1118,6 +1118,7 @@
|
|
|
1118
1118
|
SEPARATOR: Color.fromHex('#cccccc'),
|
|
1119
1119
|
COMMITMENT: Color.fromHex('#DA0F78'),
|
|
1120
1120
|
PARAMETER: Color.fromHex('#8e44ad'),
|
|
1121
|
+
CODE_BLOCK: Color.fromHex('#7700ffff'),
|
|
1121
1122
|
});
|
|
1122
1123
|
// <- TODO: [🧠][🈵] Using `Color` here increases the package size approx 3kb, maybe remove it
|
|
1123
1124
|
/**
|
|
@@ -3494,74 +3495,90 @@
|
|
|
3494
3495
|
* in real-time through an observable.
|
|
3495
3496
|
*
|
|
3496
3497
|
* @param llmTools - The LLM tools to be intercepted and tracked
|
|
3497
|
-
* @returns
|
|
3498
|
+
* @returns Full proxy of the tools with added usage tracking capabilities
|
|
3498
3499
|
* @public exported from `@promptbook/core`
|
|
3499
3500
|
*/
|
|
3500
3501
|
function countUsage(llmTools) {
|
|
3501
3502
|
let totalUsage = ZERO_USAGE;
|
|
3502
3503
|
const spending = new rxjs.Subject();
|
|
3503
|
-
|
|
3504
|
-
|
|
3505
|
-
|
|
3506
|
-
//
|
|
3507
|
-
|
|
3508
|
-
|
|
3509
|
-
|
|
3510
|
-
|
|
3511
|
-
|
|
3512
|
-
//
|
|
3513
|
-
|
|
3514
|
-
|
|
3515
|
-
|
|
3516
|
-
|
|
3517
|
-
|
|
3518
|
-
|
|
3519
|
-
|
|
3520
|
-
|
|
3521
|
-
|
|
3522
|
-
|
|
3523
|
-
|
|
3524
|
-
//
|
|
3525
|
-
|
|
3504
|
+
// Create a Proxy to intercept all property access and ensure full proxying of all properties
|
|
3505
|
+
const proxyTools = new Proxy(llmTools, {
|
|
3506
|
+
get(target, prop, receiver) {
|
|
3507
|
+
// Handle title property
|
|
3508
|
+
if (prop === 'title') {
|
|
3509
|
+
return `${target.title} (+usage)`;
|
|
3510
|
+
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
3511
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
3512
|
+
}
|
|
3513
|
+
// Handle description property
|
|
3514
|
+
if (prop === 'description') {
|
|
3515
|
+
return `${target.description} (+usage)`;
|
|
3516
|
+
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
3517
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
3518
|
+
}
|
|
3519
|
+
// Handle spending method (new method added by this wrapper)
|
|
3520
|
+
if (prop === 'spending') {
|
|
3521
|
+
return () => {
|
|
3522
|
+
return spending.asObservable();
|
|
3523
|
+
};
|
|
3524
|
+
}
|
|
3525
|
+
// Handle getTotalUsage method (new method added by this wrapper)
|
|
3526
|
+
if (prop === 'getTotalUsage') {
|
|
3527
|
+
// <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
|
|
3528
|
+
return () => {
|
|
3529
|
+
return totalUsage;
|
|
3530
|
+
};
|
|
3531
|
+
}
|
|
3532
|
+
// Handle callChatModel method with usage counting
|
|
3533
|
+
if (prop === 'callChatModel' && target.callChatModel !== undefined) {
|
|
3534
|
+
return async (prompt) => {
|
|
3535
|
+
// console.info('[🚕] callChatModel through countTotalUsage');
|
|
3536
|
+
const promptResult = await target.callChatModel(prompt);
|
|
3537
|
+
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
3538
|
+
spending.next(promptResult.usage);
|
|
3539
|
+
return promptResult;
|
|
3540
|
+
};
|
|
3541
|
+
}
|
|
3542
|
+
// Handle callCompletionModel method with usage counting
|
|
3543
|
+
if (prop === 'callCompletionModel' && target.callCompletionModel !== undefined) {
|
|
3544
|
+
return async (prompt) => {
|
|
3545
|
+
// console.info('[🚕] callCompletionModel through countTotalUsage');
|
|
3546
|
+
const promptResult = await target.callCompletionModel(prompt);
|
|
3547
|
+
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
3548
|
+
spending.next(promptResult.usage);
|
|
3549
|
+
return promptResult;
|
|
3550
|
+
};
|
|
3551
|
+
}
|
|
3552
|
+
// Handle callEmbeddingModel method with usage counting
|
|
3553
|
+
if (prop === 'callEmbeddingModel' && target.callEmbeddingModel !== undefined) {
|
|
3554
|
+
return async (prompt) => {
|
|
3555
|
+
// console.info('[🚕] callEmbeddingModel through countTotalUsage');
|
|
3556
|
+
const promptResult = await target.callEmbeddingModel(prompt);
|
|
3557
|
+
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
3558
|
+
spending.next(promptResult.usage);
|
|
3559
|
+
return promptResult;
|
|
3560
|
+
};
|
|
3561
|
+
}
|
|
3562
|
+
// Handle callImageGenerationModel method with usage counting
|
|
3563
|
+
if (prop === 'callImageGenerationModel' && target.callImageGenerationModel !== undefined) {
|
|
3564
|
+
return async (prompt) => {
|
|
3565
|
+
// console.info('[🚕] callImageGenerationModel through countTotalUsage');
|
|
3566
|
+
const promptResult = await target.callImageGenerationModel(prompt);
|
|
3567
|
+
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
3568
|
+
spending.next(promptResult.usage);
|
|
3569
|
+
return promptResult;
|
|
3570
|
+
};
|
|
3571
|
+
}
|
|
3572
|
+
// <- Note: [🤖]
|
|
3573
|
+
// For all other properties and methods, delegate to the original target
|
|
3574
|
+
const value = Reflect.get(target, prop, receiver);
|
|
3575
|
+
// If it's a function, bind it to the target to preserve context
|
|
3576
|
+
if (typeof value === 'function') {
|
|
3577
|
+
return value.bind(target);
|
|
3578
|
+
}
|
|
3579
|
+
return value;
|
|
3526
3580
|
},
|
|
3527
|
-
};
|
|
3528
|
-
if (llmTools.callChatModel !== undefined) {
|
|
3529
|
-
proxyTools.callChatModel = async (prompt) => {
|
|
3530
|
-
// console.info('[🚕] callChatModel through countTotalUsage');
|
|
3531
|
-
const promptResult = await llmTools.callChatModel(prompt);
|
|
3532
|
-
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
3533
|
-
spending.next(promptResult.usage);
|
|
3534
|
-
return promptResult;
|
|
3535
|
-
};
|
|
3536
|
-
}
|
|
3537
|
-
if (llmTools.callCompletionModel !== undefined) {
|
|
3538
|
-
proxyTools.callCompletionModel = async (prompt) => {
|
|
3539
|
-
// console.info('[🚕] callCompletionModel through countTotalUsage');
|
|
3540
|
-
const promptResult = await llmTools.callCompletionModel(prompt);
|
|
3541
|
-
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
3542
|
-
spending.next(promptResult.usage);
|
|
3543
|
-
return promptResult;
|
|
3544
|
-
};
|
|
3545
|
-
}
|
|
3546
|
-
if (llmTools.callEmbeddingModel !== undefined) {
|
|
3547
|
-
proxyTools.callEmbeddingModel = async (prompt) => {
|
|
3548
|
-
// console.info('[🚕] callEmbeddingModel through countTotalUsage');
|
|
3549
|
-
const promptResult = await llmTools.callEmbeddingModel(prompt);
|
|
3550
|
-
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
3551
|
-
spending.next(promptResult.usage);
|
|
3552
|
-
return promptResult;
|
|
3553
|
-
};
|
|
3554
|
-
}
|
|
3555
|
-
if (llmTools.callImageGenerationModel !== undefined) {
|
|
3556
|
-
proxyTools.callImageGenerationModel = async (prompt) => {
|
|
3557
|
-
// console.info('[🚕] callImageGenerationModel through countTotalUsage');
|
|
3558
|
-
const promptResult = await llmTools.callImageGenerationModel(prompt);
|
|
3559
|
-
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
3560
|
-
spending.next(promptResult.usage);
|
|
3561
|
-
return promptResult;
|
|
3562
|
-
};
|
|
3563
|
-
}
|
|
3564
|
-
// <- Note: [🤖]
|
|
3581
|
+
});
|
|
3565
3582
|
return proxyTools;
|
|
3566
3583
|
}
|
|
3567
3584
|
/**
|