@promptbook/pdf 0.104.0-13 → 0.104.0-14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +25 -17
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +0 -6
- package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +4 -1
- package/esm/typings/src/book-components/_common/Dropdown/Dropdown.d.ts +5 -1
- package/esm/typings/src/book-components/_common/HamburgerMenu/HamburgerMenu.d.ts +4 -0
- package/esm/typings/src/book-components/icons/AboutIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/AttachmentIcon.d.ts +6 -2
- package/esm/typings/src/book-components/icons/CameraIcon.d.ts +6 -2
- package/esm/typings/src/book-components/icons/DownloadIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/MenuIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/SaveIcon.d.ts +6 -2
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +7 -5
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/countUsage.d.ts +7 -3
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +11 -7
- package/esm/typings/src/remote-server/ui/ServerApp.d.ts +5 -1
- package/esm/typings/src/types/typeAliasEmoji.d.ts +2 -2
- package/esm/typings/src/utils/random/$randomAgentPersona.d.ts +4 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +25 -17
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -24,7 +24,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
24
24
|
* @generated
|
|
25
25
|
* @see https://github.com/webgptorg/promptbook
|
|
26
26
|
*/
|
|
27
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-
|
|
27
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-14';
|
|
28
28
|
/**
|
|
29
29
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
30
30
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1577,8 +1577,8 @@ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
|
|
|
1577
1577
|
*/
|
|
1578
1578
|
function removeDiacritics(input) {
|
|
1579
1579
|
/*eslint no-control-regex: "off"*/
|
|
1580
|
-
return input.replace(/[^\u0000-\u007E]/g, (
|
|
1581
|
-
return DIACRITIC_VARIANTS_LETTERS[
|
|
1580
|
+
return input.replace(/[^\u0000-\u007E]/g, (character) => {
|
|
1581
|
+
return DIACRITIC_VARIANTS_LETTERS[character] || character;
|
|
1582
1582
|
});
|
|
1583
1583
|
}
|
|
1584
1584
|
/**
|
|
@@ -3767,7 +3767,7 @@ async function forEachAsync(array, options, callbackfunction) {
|
|
|
3767
3767
|
tasks.push(task);
|
|
3768
3768
|
runningTasks.push(task);
|
|
3769
3769
|
/* not await */ Promise.resolve(task).then(() => {
|
|
3770
|
-
runningTasks = runningTasks.filter((
|
|
3770
|
+
runningTasks = runningTasks.filter((runningTask) => runningTask !== task);
|
|
3771
3771
|
});
|
|
3772
3772
|
if (maxParallelCount < runningTasks.length) {
|
|
3773
3773
|
await Promise.race(runningTasks);
|
|
@@ -3824,10 +3824,14 @@ function addUsage(...usageItems) {
|
|
|
3824
3824
|
}
|
|
3825
3825
|
|
|
3826
3826
|
/**
|
|
3827
|
-
* Intercepts LLM tools and counts total usage of the tools
|
|
3827
|
+
* Intercepts LLM tools and counts total usage of the tools.
|
|
3828
3828
|
*
|
|
3829
|
-
*
|
|
3830
|
-
*
|
|
3829
|
+
* This function wraps the provided `LlmExecutionTools` with a proxy that tracks the cumulative
|
|
3830
|
+
* usage (tokens, cost, etc.) across all model calls. It provides a way to monitor spending
|
|
3831
|
+
* in real-time through an observable.
|
|
3832
|
+
*
|
|
3833
|
+
* @param llmTools - The LLM tools to be intercepted and tracked
|
|
3834
|
+
* @returns An augmented version of the tools that includes usage tracking capabilities
|
|
3831
3835
|
* @public exported from `@promptbook/core`
|
|
3832
3836
|
*/
|
|
3833
3837
|
function countUsage(llmTools) {
|
|
@@ -4092,17 +4096,21 @@ class MultipleLlmExecutionTools {
|
|
|
4092
4096
|
*/
|
|
4093
4097
|
|
|
4094
4098
|
/**
|
|
4095
|
-
* Joins multiple LLM Execution Tools into one
|
|
4099
|
+
* Joins multiple LLM Execution Tools into one.
|
|
4096
4100
|
*
|
|
4097
|
-
*
|
|
4101
|
+
* This function takes a list of `LlmExecutionTools` and returns a single unified
|
|
4102
|
+
* `MultipleLlmExecutionTools` object. It provides failover and aggregation logic:
|
|
4098
4103
|
*
|
|
4099
|
-
*
|
|
4100
|
-
*
|
|
4101
|
-
* 2
|
|
4102
|
-
* 3
|
|
4104
|
+
* 1. **Failover**: When a model call is made, it tries providers in the order they were provided.
|
|
4105
|
+
* If the first provider doesn't support the requested model or fails, it tries the next one.
|
|
4106
|
+
* 2. **Aggregation**: `listModels` returns a combined list of all models available from all providers.
|
|
4107
|
+
* 3. **Empty case**: If no tools are provided, it logs a warning (as Promptbook requires LLMs to function).
|
|
4103
4108
|
*
|
|
4109
|
+
* @param title - A descriptive title for this collection of joined tools
|
|
4110
|
+
* @param llmExecutionTools - An array of execution tools to be joined
|
|
4111
|
+
* @returns A single unified execution tool wrapper
|
|
4104
4112
|
*
|
|
4105
|
-
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools
|
|
4113
|
+
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`.
|
|
4106
4114
|
*
|
|
4107
4115
|
* @public exported from `@promptbook/core`
|
|
4108
4116
|
*/
|
|
@@ -6636,7 +6644,7 @@ async function getKnowledgeForTask(options) {
|
|
|
6636
6644
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
6637
6645
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
6638
6646
|
const { index } = knowledgePiece;
|
|
6639
|
-
const knowledgePieceIndex = index.find((
|
|
6647
|
+
const knowledgePieceIndex = index.find((knowledgePieceIndex) => knowledgePieceIndex.modelName === firstKnowledgeIndex.modelName);
|
|
6640
6648
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
6641
6649
|
if (knowledgePieceIndex === undefined) {
|
|
6642
6650
|
return {
|
|
@@ -7084,7 +7092,7 @@ async function executePipeline(options) {
|
|
|
7084
7092
|
resovedParameterNames = [...resovedParameterNames, currentTask.resultingParameterName];
|
|
7085
7093
|
})
|
|
7086
7094
|
.then(() => {
|
|
7087
|
-
resolving = resolving.filter((
|
|
7095
|
+
resolving = resolving.filter((workItem) => workItem !== work);
|
|
7088
7096
|
});
|
|
7089
7097
|
// <- Note: Errors are catched here [3]
|
|
7090
7098
|
// TODO: BUT if in multiple tasks are errors, only the first one is catched so maybe we should catch errors here and save them to errors array here
|
|
@@ -7250,7 +7258,7 @@ function createPipelineExecutor(options) {
|
|
|
7250
7258
|
// Calculate and update tldr based on pipeline progress
|
|
7251
7259
|
const cv = newOngoingResult;
|
|
7252
7260
|
// Calculate progress based on parameters resolved vs total parameters
|
|
7253
|
-
const totalParameters = pipeline.parameters.filter((
|
|
7261
|
+
const totalParameters = pipeline.parameters.filter((parameter) => !parameter.isInput).length;
|
|
7254
7262
|
let resolvedParameters = 0;
|
|
7255
7263
|
let currentTaskTitle = '';
|
|
7256
7264
|
// Get the resolved parameters from output parameters
|