@promptbook/node 0.104.0-12 → 0.104.0-14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +31 -18
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +0 -6
- package/esm/typings/src/book-components/Chat/save/_common/string_chat_format_name.d.ts +1 -1
- package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +4 -1
- package/esm/typings/src/book-components/_common/Dropdown/Dropdown.d.ts +5 -1
- package/esm/typings/src/book-components/_common/HamburgerMenu/HamburgerMenu.d.ts +4 -0
- package/esm/typings/src/book-components/icons/AboutIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/AttachmentIcon.d.ts +6 -2
- package/esm/typings/src/book-components/icons/CameraIcon.d.ts +6 -2
- package/esm/typings/src/book-components/icons/DownloadIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/MenuIcon.d.ts +5 -1
- package/esm/typings/src/book-components/icons/SaveIcon.d.ts +6 -2
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +7 -5
- package/esm/typings/src/commands/_common/types/Command.d.ts +1 -1
- package/esm/typings/src/commitments/_base/BookCommitment.d.ts +1 -1
- package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/utils/count-total-usage/countUsage.d.ts +7 -3
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +11 -7
- package/esm/typings/src/remote-server/ui/ServerApp.d.ts +5 -1
- package/esm/typings/src/types/typeAliasEmoji.d.ts +2 -2
- package/esm/typings/src/utils/random/$randomAgentPersona.d.ts +4 -0
- package/esm/typings/src/utils/random/$randomItem.d.ts +1 -1
- package/esm/typings/src/utils/random/$randomSeed.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +31 -18
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -28,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
28
28
|
* @generated
|
|
29
29
|
* @see https://github.com/webgptorg/promptbook
|
|
30
30
|
*/
|
|
31
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-
|
|
31
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-14';
|
|
32
32
|
/**
|
|
33
33
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
34
34
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -4128,17 +4128,21 @@ class MultipleLlmExecutionTools {
|
|
|
4128
4128
|
*/
|
|
4129
4129
|
|
|
4130
4130
|
/**
|
|
4131
|
-
* Joins multiple LLM Execution Tools into one
|
|
4131
|
+
* Joins multiple LLM Execution Tools into one.
|
|
4132
4132
|
*
|
|
4133
|
-
*
|
|
4133
|
+
* This function takes a list of `LlmExecutionTools` and returns a single unified
|
|
4134
|
+
* `MultipleLlmExecutionTools` object. It provides failover and aggregation logic:
|
|
4134
4135
|
*
|
|
4135
|
-
*
|
|
4136
|
-
*
|
|
4137
|
-
* 2
|
|
4138
|
-
* 3
|
|
4136
|
+
* 1. **Failover**: When a model call is made, it tries providers in the order they were provided.
|
|
4137
|
+
* If the first provider doesn't support the requested model or fails, it tries the next one.
|
|
4138
|
+
* 2. **Aggregation**: `listModels` returns a combined list of all models available from all providers.
|
|
4139
|
+
* 3. **Empty case**: If no tools are provided, it logs a warning (as Promptbook requires LLMs to function).
|
|
4139
4140
|
*
|
|
4141
|
+
* @param title - A descriptive title for this collection of joined tools
|
|
4142
|
+
* @param llmExecutionTools - An array of execution tools to be joined
|
|
4143
|
+
* @returns A single unified execution tool wrapper
|
|
4140
4144
|
*
|
|
4141
|
-
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools
|
|
4145
|
+
* Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`.
|
|
4142
4146
|
*
|
|
4143
4147
|
* @public exported from `@promptbook/core`
|
|
4144
4148
|
*/
|
|
@@ -4749,8 +4753,8 @@ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
|
|
|
4749
4753
|
*/
|
|
4750
4754
|
function removeDiacritics(input) {
|
|
4751
4755
|
/*eslint no-control-regex: "off"*/
|
|
4752
|
-
return input.replace(/[^\u0000-\u007E]/g, (
|
|
4753
|
-
return DIACRITIC_VARIANTS_LETTERS[
|
|
4756
|
+
return input.replace(/[^\u0000-\u007E]/g, (character) => {
|
|
4757
|
+
return DIACRITIC_VARIANTS_LETTERS[character] || character;
|
|
4754
4758
|
});
|
|
4755
4759
|
}
|
|
4756
4760
|
/**
|
|
@@ -5441,7 +5445,7 @@ async function getKnowledgeForTask(options) {
|
|
|
5441
5445
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5442
5446
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5443
5447
|
const { index } = knowledgePiece;
|
|
5444
|
-
const knowledgePieceIndex = index.find((
|
|
5448
|
+
const knowledgePieceIndex = index.find((knowledgePieceIndex) => knowledgePieceIndex.modelName === firstKnowledgeIndex.modelName);
|
|
5445
5449
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5446
5450
|
if (knowledgePieceIndex === undefined) {
|
|
5447
5451
|
return {
|
|
@@ -5889,7 +5893,7 @@ async function executePipeline(options) {
|
|
|
5889
5893
|
resovedParameterNames = [...resovedParameterNames, currentTask.resultingParameterName];
|
|
5890
5894
|
})
|
|
5891
5895
|
.then(() => {
|
|
5892
|
-
resolving = resolving.filter((
|
|
5896
|
+
resolving = resolving.filter((workItem) => workItem !== work);
|
|
5893
5897
|
});
|
|
5894
5898
|
// <- Note: Errors are catched here [3]
|
|
5895
5899
|
// TODO: BUT if in multiple tasks are errors, only the first one is catched so maybe we should catch errors here and save them to errors array here
|
|
@@ -6055,7 +6059,7 @@ function createPipelineExecutor(options) {
|
|
|
6055
6059
|
// Calculate and update tldr based on pipeline progress
|
|
6056
6060
|
const cv = newOngoingResult;
|
|
6057
6061
|
// Calculate progress based on parameters resolved vs total parameters
|
|
6058
|
-
const totalParameters = pipeline.parameters.filter((
|
|
6062
|
+
const totalParameters = pipeline.parameters.filter((parameter) => !parameter.isInput).length;
|
|
6059
6063
|
let resolvedParameters = 0;
|
|
6060
6064
|
let currentTaskTitle = '';
|
|
6061
6065
|
// Get the resolved parameters from output parameters
|
|
@@ -6135,7 +6139,7 @@ async function forEachAsync(array, options, callbackfunction) {
|
|
|
6135
6139
|
tasks.push(task);
|
|
6136
6140
|
runningTasks.push(task);
|
|
6137
6141
|
/* not await */ Promise.resolve(task).then(() => {
|
|
6138
|
-
runningTasks = runningTasks.filter((
|
|
6142
|
+
runningTasks = runningTasks.filter((runningTask) => runningTask !== task);
|
|
6139
6143
|
});
|
|
6140
6144
|
if (maxParallelCount < runningTasks.length) {
|
|
6141
6145
|
await Promise.race(runningTasks);
|
|
@@ -6145,10 +6149,14 @@ async function forEachAsync(array, options, callbackfunction) {
|
|
|
6145
6149
|
}
|
|
6146
6150
|
|
|
6147
6151
|
/**
|
|
6148
|
-
* Intercepts LLM tools and counts total usage of the tools
|
|
6152
|
+
* Intercepts LLM tools and counts total usage of the tools.
|
|
6149
6153
|
*
|
|
6150
|
-
*
|
|
6151
|
-
*
|
|
6154
|
+
* This function wraps the provided `LlmExecutionTools` with a proxy that tracks the cumulative
|
|
6155
|
+
* usage (tokens, cost, etc.) across all model calls. It provides a way to monitor spending
|
|
6156
|
+
* in real-time through an observable.
|
|
6157
|
+
*
|
|
6158
|
+
* @param llmTools - The LLM tools to be intercepted and tracked
|
|
6159
|
+
* @returns An augmented version of the tools that includes usage tracking capabilities
|
|
6152
6160
|
* @public exported from `@promptbook/core`
|
|
6153
6161
|
*/
|
|
6154
6162
|
function countUsage(llmTools) {
|
|
@@ -8815,7 +8823,12 @@ const jokerCommandParser = {
|
|
|
8815
8823
|
* @see {@link ModelVariant}
|
|
8816
8824
|
* @public exported from `@promptbook/core`
|
|
8817
8825
|
*/
|
|
8818
|
-
const MODEL_VARIANTS = [
|
|
8826
|
+
const MODEL_VARIANTS = [
|
|
8827
|
+
'COMPLETION',
|
|
8828
|
+
'CHAT',
|
|
8829
|
+
'IMAGE_GENERATION',
|
|
8830
|
+
'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */,
|
|
8831
|
+
];
|
|
8819
8832
|
|
|
8820
8833
|
/**
|
|
8821
8834
|
* Parses the model command
|