@promptbook/markitdown 0.104.0-13 → 0.104.0-15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +25 -17
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +0 -6
- package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +4 -1
- package/esm/typings/src/book-components/_common/Dropdown/Dropdown.d.ts +5 -1
- package/esm/typings/src/book-components/_common/HamburgerMenu/HamburgerMenu.d.ts +4 -0
- package/esm/typings/src/book-components/icons/AboutIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/AttachmentIcon.d.ts +6 -2
- package/esm/typings/src/book-components/icons/CameraIcon.d.ts +6 -2
- package/esm/typings/src/book-components/icons/DownloadIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/MenuIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/SaveIcon.d.ts +6 -2
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +7 -5
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/countUsage.d.ts +7 -3
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +11 -7
- package/esm/typings/src/remote-server/ui/ServerApp.d.ts +5 -1
- package/esm/typings/src/types/typeAliasEmoji.d.ts +2 -2
- package/esm/typings/src/utils/random/$randomAgentPersona.d.ts +4 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +25 -17
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -24,7 +24,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
24
24
|
* @generated
|
|
25
25
|
* @see https://github.com/webgptorg/promptbook
|
|
26
26
|
*/
|
|
27
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-
|
|
27
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-15';
|
|
28
28
|
/**
|
|
29
29
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
30
30
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1564,8 +1564,8 @@ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
|
|
|
1564
1564
|
*/
|
|
1565
1565
|
function removeDiacritics(input) {
|
|
1566
1566
|
/*eslint no-control-regex: "off"*/
|
|
1567
|
-
return input.replace(/[^\u0000-\u007E]/g, (
|
|
1568
|
-
return DIACRITIC_VARIANTS_LETTERS[
|
|
1567
|
+
return input.replace(/[^\u0000-\u007E]/g, (character) => {
|
|
1568
|
+
return DIACRITIC_VARIANTS_LETTERS[character] || character;
|
|
1569
1569
|
});
|
|
1570
1570
|
}
|
|
1571
1571
|
/**
|
|
@@ -3754,7 +3754,7 @@ async function forEachAsync(array, options, callbackfunction) {
|
|
|
3754
3754
|
tasks.push(task);
|
|
3755
3755
|
runningTasks.push(task);
|
|
3756
3756
|
/* not await */ Promise.resolve(task).then(() => {
|
|
3757
|
-
runningTasks = runningTasks.filter((
|
|
3757
|
+
runningTasks = runningTasks.filter((runningTask) => runningTask !== task);
|
|
3758
3758
|
});
|
|
3759
3759
|
if (maxParallelCount < runningTasks.length) {
|
|
3760
3760
|
await Promise.race(runningTasks);
|
|
@@ -3811,10 +3811,14 @@ function addUsage(...usageItems) {
|
|
|
3811
3811
|
}
|
|
3812
3812
|
|
|
3813
3813
|
/**
|
|
3814
|
-
* Intercepts LLM tools and counts total usage of the tools
|
|
3814
|
+
* Intercepts LLM tools and counts total usage of the tools.
|
|
3815
3815
|
*
|
|
3816
|
-
*
|
|
3817
|
-
*
|
|
3816
|
+
* This function wraps the provided `LlmExecutionTools` with a proxy that tracks the cumulative
|
|
3817
|
+
* usage (tokens, cost, etc.) across all model calls. It provides a way to monitor spending
|
|
3818
|
+
* in real-time through an observable.
|
|
3819
|
+
*
|
|
3820
|
+
* @param llmTools - The LLM tools to be intercepted and tracked
|
|
3821
|
+
* @returns An augmented version of the tools that includes usage tracking capabilities
|
|
3818
3822
|
* @public exported from `@promptbook/core`
|
|
3819
3823
|
*/
|
|
3820
3824
|
function countUsage(llmTools) {
|
|
@@ -4079,17 +4083,21 @@ class MultipleLlmExecutionTools {
|
|
|
4079
4083
|
*/
|
|
4080
4084
|
|
|
4081
4085
|
/**
|
|
4082
|
-
* Joins multiple LLM Execution Tools into one
|
|
4086
|
+
* Joins multiple LLM Execution Tools into one.
|
|
4083
4087
|
*
|
|
4084
|
-
*
|
|
4088
|
+
* This function takes a list of `LlmExecutionTools` and returns a single unified
|
|
4089
|
+
* `MultipleLlmExecutionTools` object. It provides failover and aggregation logic:
|
|
4085
4090
|
*
|
|
4086
|
-
*
|
|
4087
|
-
*
|
|
4088
|
-
* 2
|
|
4089
|
-
* 3
|
|
4091
|
+
* 1. **Failover**: When a model call is made, it tries providers in the order they were provided.
|
|
4092
|
+
* If the first provider doesn't support the requested model or fails, it tries the next one.
|
|
4093
|
+
* 2. **Aggregation**: `listModels` returns a combined list of all models available from all providers.
|
|
4094
|
+
* 3. **Empty case**: If no tools are provided, it logs a warning (as Promptbook requires LLMs to function).
|
|
4090
4095
|
*
|
|
4096
|
+
* @param title - A descriptive title for this collection of joined tools
|
|
4097
|
+
* @param llmExecutionTools - An array of execution tools to be joined
|
|
4098
|
+
* @returns A single unified execution tool wrapper
|
|
4091
4099
|
*
|
|
4092
|
-
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools
|
|
4100
|
+
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`.
|
|
4093
4101
|
*
|
|
4094
4102
|
* @public exported from `@promptbook/core`
|
|
4095
4103
|
*/
|
|
@@ -6623,7 +6631,7 @@ async function getKnowledgeForTask(options) {
|
|
|
6623
6631
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
6624
6632
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
6625
6633
|
const { index } = knowledgePiece;
|
|
6626
|
-
const knowledgePieceIndex = index.find((
|
|
6634
|
+
const knowledgePieceIndex = index.find((knowledgePieceIndex) => knowledgePieceIndex.modelName === firstKnowledgeIndex.modelName);
|
|
6627
6635
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
6628
6636
|
if (knowledgePieceIndex === undefined) {
|
|
6629
6637
|
return {
|
|
@@ -7071,7 +7079,7 @@ async function executePipeline(options) {
|
|
|
7071
7079
|
resovedParameterNames = [...resovedParameterNames, currentTask.resultingParameterName];
|
|
7072
7080
|
})
|
|
7073
7081
|
.then(() => {
|
|
7074
|
-
resolving = resolving.filter((
|
|
7082
|
+
resolving = resolving.filter((workItem) => workItem !== work);
|
|
7075
7083
|
});
|
|
7076
7084
|
// <- Note: Errors are catched here [3]
|
|
7077
7085
|
// TODO: BUT if in multiple tasks are errors, only the first one is catched so maybe we should catch errors here and save them to errors array here
|
|
@@ -7237,7 +7245,7 @@ function createPipelineExecutor(options) {
|
|
|
7237
7245
|
// Calculate and update tldr based on pipeline progress
|
|
7238
7246
|
const cv = newOngoingResult;
|
|
7239
7247
|
// Calculate progress based on parameters resolved vs total parameters
|
|
7240
|
-
const totalParameters = pipeline.parameters.filter((
|
|
7248
|
+
const totalParameters = pipeline.parameters.filter((parameter) => !parameter.isInput).length;
|
|
7241
7249
|
let resolvedParameters = 0;
|
|
7242
7250
|
let currentTaskTitle = '';
|
|
7243
7251
|
// Get the resolved parameters from output parameters
|