@promptbook/markdown-utils 0.104.0-3 → 0.104.0-4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +23 -2
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +6 -2
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +23 -2
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -23,7 +23,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
23
23
|
* @generated
|
|
24
24
|
* @see https://github.com/webgptorg/promptbook
|
|
25
25
|
*/
|
|
26
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-
|
|
26
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
|
|
27
27
|
/**
|
|
28
28
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
29
29
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -3545,6 +3545,15 @@ function countUsage(llmTools) {
|
|
|
3545
3545
|
return promptResult;
|
|
3546
3546
|
};
|
|
3547
3547
|
}
|
|
3548
|
+
if (llmTools.callImageGenerationModel !== undefined) {
|
|
3549
|
+
proxyTools.callImageGenerationModel = async (prompt) => {
|
|
3550
|
+
// console.info('[🚕] callImageGenerationModel through countTotalUsage');
|
|
3551
|
+
const promptResult = await llmTools.callImageGenerationModel(prompt);
|
|
3552
|
+
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
3553
|
+
spending.next(promptResult.usage);
|
|
3554
|
+
return promptResult;
|
|
3555
|
+
};
|
|
3556
|
+
}
|
|
3548
3557
|
// <- Note: [🤖]
|
|
3549
3558
|
return proxyTools;
|
|
3550
3559
|
}
|
|
@@ -3654,6 +3663,12 @@ class MultipleLlmExecutionTools {
|
|
|
3654
3663
|
callEmbeddingModel(prompt) {
|
|
3655
3664
|
return this.callCommonModel(prompt);
|
|
3656
3665
|
}
|
|
3666
|
+
/**
|
|
3667
|
+
* Calls the best available embedding model
|
|
3668
|
+
*/
|
|
3669
|
+
callImageGenerationModel(prompt) {
|
|
3670
|
+
return this.callCommonModel(prompt);
|
|
3671
|
+
}
|
|
3657
3672
|
// <- Note: [🤖]
|
|
3658
3673
|
/**
|
|
3659
3674
|
* Calls the best available model
|
|
@@ -3680,6 +3695,11 @@ class MultipleLlmExecutionTools {
|
|
|
3680
3695
|
continue llm;
|
|
3681
3696
|
}
|
|
3682
3697
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
|
3698
|
+
case 'IMAGE_GENERATION':
|
|
3699
|
+
if (llmExecutionTools.callImageGenerationModel === undefined) {
|
|
3700
|
+
continue llm;
|
|
3701
|
+
}
|
|
3702
|
+
return await llmExecutionTools.callImageGenerationModel(prompt);
|
|
3683
3703
|
// <- case [🤖]:
|
|
3684
3704
|
default:
|
|
3685
3705
|
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
|
@@ -6107,8 +6127,9 @@ async function executeAttempts(options) {
|
|
|
6107
6127
|
$ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
|
|
6108
6128
|
break variant;
|
|
6109
6129
|
case 'EMBEDDING':
|
|
6130
|
+
case 'IMAGE_GENERATION':
|
|
6110
6131
|
throw new PipelineExecutionError(spaceTrim$2((block) => `
|
|
6111
|
-
|
|
6132
|
+
${modelRequirements.modelVariant} model can not be used in pipeline
|
|
6112
6133
|
|
|
6113
6134
|
This should be catched during parsing
|
|
6114
6135
|
|