@promptbook/remote-server 0.104.0-13 → 0.104.0-14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +25 -17
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +0 -6
- package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +4 -1
- package/esm/typings/src/book-components/_common/Dropdown/Dropdown.d.ts +5 -1
- package/esm/typings/src/book-components/_common/HamburgerMenu/HamburgerMenu.d.ts +4 -0
- package/esm/typings/src/book-components/icons/AboutIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/AttachmentIcon.d.ts +6 -2
- package/esm/typings/src/book-components/icons/CameraIcon.d.ts +6 -2
- package/esm/typings/src/book-components/icons/DownloadIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/MenuIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/SaveIcon.d.ts +6 -2
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +7 -5
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/countUsage.d.ts +7 -3
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +11 -7
- package/esm/typings/src/remote-server/ui/ServerApp.d.ts +5 -1
- package/esm/typings/src/types/typeAliasEmoji.d.ts +2 -2
- package/esm/typings/src/utils/random/$randomAgentPersona.d.ts +4 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +25 -17
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -33,7 +33,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
33
33
|
* @generated
|
|
34
34
|
* @see https://github.com/webgptorg/promptbook
|
|
35
35
|
*/
|
|
36
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-
|
|
36
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-14';
|
|
37
37
|
/**
|
|
38
38
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
39
39
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -3712,7 +3712,7 @@ async function forEachAsync(array, options, callbackfunction) {
|
|
|
3712
3712
|
tasks.push(task);
|
|
3713
3713
|
runningTasks.push(task);
|
|
3714
3714
|
/* not await */ Promise.resolve(task).then(() => {
|
|
3715
|
-
runningTasks = runningTasks.filter((
|
|
3715
|
+
runningTasks = runningTasks.filter((runningTask) => runningTask !== task);
|
|
3716
3716
|
});
|
|
3717
3717
|
if (maxParallelCount < runningTasks.length) {
|
|
3718
3718
|
await Promise.race(runningTasks);
|
|
@@ -3769,10 +3769,14 @@ function addUsage(...usageItems) {
|
|
|
3769
3769
|
}
|
|
3770
3770
|
|
|
3771
3771
|
/**
|
|
3772
|
-
* Intercepts LLM tools and counts total usage of the tools
|
|
3772
|
+
* Intercepts LLM tools and counts total usage of the tools.
|
|
3773
3773
|
*
|
|
3774
|
-
*
|
|
3775
|
-
*
|
|
3774
|
+
* This function wraps the provided `LlmExecutionTools` with a proxy that tracks the cumulative
|
|
3775
|
+
* usage (tokens, cost, etc.) across all model calls. It provides a way to monitor spending
|
|
3776
|
+
* in real-time through an observable.
|
|
3777
|
+
*
|
|
3778
|
+
* @param llmTools - The LLM tools to be intercepted and tracked
|
|
3779
|
+
* @returns An augmented version of the tools that includes usage tracking capabilities
|
|
3776
3780
|
* @public exported from `@promptbook/core`
|
|
3777
3781
|
*/
|
|
3778
3782
|
function countUsage(llmTools) {
|
|
@@ -4037,17 +4041,21 @@ class MultipleLlmExecutionTools {
|
|
|
4037
4041
|
*/
|
|
4038
4042
|
|
|
4039
4043
|
/**
|
|
4040
|
-
* Joins multiple LLM Execution Tools into one
|
|
4044
|
+
* Joins multiple LLM Execution Tools into one.
|
|
4041
4045
|
*
|
|
4042
|
-
*
|
|
4046
|
+
* This function takes a list of `LlmExecutionTools` and returns a single unified
|
|
4047
|
+
* `MultipleLlmExecutionTools` object. It provides failover and aggregation logic:
|
|
4043
4048
|
*
|
|
4044
|
-
*
|
|
4045
|
-
*
|
|
4046
|
-
* 2
|
|
4047
|
-
* 3
|
|
4049
|
+
* 1. **Failover**: When a model call is made, it tries providers in the order they were provided.
|
|
4050
|
+
* If the first provider doesn't support the requested model or fails, it tries the next one.
|
|
4051
|
+
* 2. **Aggregation**: `listModels` returns a combined list of all models available from all providers.
|
|
4052
|
+
* 3. **Empty case**: If no tools are provided, it logs a warning (as Promptbook requires LLMs to function).
|
|
4048
4053
|
*
|
|
4054
|
+
* @param title - A descriptive title for this collection of joined tools
|
|
4055
|
+
* @param llmExecutionTools - An array of execution tools to be joined
|
|
4056
|
+
* @returns A single unified execution tool wrapper
|
|
4049
4057
|
*
|
|
4050
|
-
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools
|
|
4058
|
+
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`.
|
|
4051
4059
|
*
|
|
4052
4060
|
* @public exported from `@promptbook/core`
|
|
4053
4061
|
*/
|
|
@@ -4674,8 +4682,8 @@ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
|
|
|
4674
4682
|
*/
|
|
4675
4683
|
function removeDiacritics(input) {
|
|
4676
4684
|
/*eslint no-control-regex: "off"*/
|
|
4677
|
-
return input.replace(/[^\u0000-\u007E]/g, (
|
|
4678
|
-
return DIACRITIC_VARIANTS_LETTERS[
|
|
4685
|
+
return input.replace(/[^\u0000-\u007E]/g, (character) => {
|
|
4686
|
+
return DIACRITIC_VARIANTS_LETTERS[character] || character;
|
|
4679
4687
|
});
|
|
4680
4688
|
}
|
|
4681
4689
|
/**
|
|
@@ -6993,7 +7001,7 @@ async function getKnowledgeForTask(options) {
|
|
|
6993
7001
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
6994
7002
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
6995
7003
|
const { index } = knowledgePiece;
|
|
6996
|
-
const knowledgePieceIndex = index.find((
|
|
7004
|
+
const knowledgePieceIndex = index.find((knowledgePieceIndex) => knowledgePieceIndex.modelName === firstKnowledgeIndex.modelName);
|
|
6997
7005
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
6998
7006
|
if (knowledgePieceIndex === undefined) {
|
|
6999
7007
|
return {
|
|
@@ -7441,7 +7449,7 @@ async function executePipeline(options) {
|
|
|
7441
7449
|
resovedParameterNames = [...resovedParameterNames, currentTask.resultingParameterName];
|
|
7442
7450
|
})
|
|
7443
7451
|
.then(() => {
|
|
7444
|
-
resolving = resolving.filter((
|
|
7452
|
+
resolving = resolving.filter((workItem) => workItem !== work);
|
|
7445
7453
|
});
|
|
7446
7454
|
// <- Note: Errors are catched here [3]
|
|
7447
7455
|
// TODO: BUT if in multiple tasks are errors, only the first one is catched so maybe we should catch errors here and save them to errors array here
|
|
@@ -7607,7 +7615,7 @@ function createPipelineExecutor(options) {
|
|
|
7607
7615
|
// Calculate and update tldr based on pipeline progress
|
|
7608
7616
|
const cv = newOngoingResult;
|
|
7609
7617
|
// Calculate progress based on parameters resolved vs total parameters
|
|
7610
|
-
const totalParameters = pipeline.parameters.filter((
|
|
7618
|
+
const totalParameters = pipeline.parameters.filter((parameter) => !parameter.isInput).length;
|
|
7611
7619
|
let resolvedParameters = 0;
|
|
7612
7620
|
let currentTaskTitle = '';
|
|
7613
7621
|
// Get the resolved parameters from output parameters
|