@promptbook/node 0.104.0-12 → 0.104.0-14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/esm/index.es.js +31 -18
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +0 -6
  4. package/esm/typings/src/book-components/Chat/save/_common/string_chat_format_name.d.ts +1 -1
  5. package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +4 -1
  6. package/esm/typings/src/book-components/_common/Dropdown/Dropdown.d.ts +5 -1
  7. package/esm/typings/src/book-components/_common/HamburgerMenu/HamburgerMenu.d.ts +4 -0
  8. package/esm/typings/src/book-components/icons/AboutIcon.d.ts +5 -1
  9. package/esm/typings/src/book-components/icons/AttachmentIcon.d.ts +6 -2
  10. package/esm/typings/src/book-components/icons/CameraIcon.d.ts +6 -2
  11. package/esm/typings/src/book-components/icons/DownloadIcon.d.ts +5 -1
  12. package/esm/typings/src/book-components/icons/MenuIcon.d.ts +5 -1
  13. package/esm/typings/src/book-components/icons/SaveIcon.d.ts +6 -2
  14. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +7 -5
  15. package/esm/typings/src/commands/_common/types/Command.d.ts +1 -1
  16. package/esm/typings/src/commitments/_base/BookCommitment.d.ts +1 -1
  17. package/esm/typings/src/formfactors/_common/FormfactorDefinition.d.ts +1 -1
  18. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/countUsage.d.ts +7 -3
  19. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +11 -7
  20. package/esm/typings/src/remote-server/ui/ServerApp.d.ts +5 -1
  21. package/esm/typings/src/types/typeAliasEmoji.d.ts +2 -2
  22. package/esm/typings/src/utils/random/$randomAgentPersona.d.ts +4 -0
  23. package/esm/typings/src/utils/random/$randomItem.d.ts +1 -1
  24. package/esm/typings/src/utils/random/$randomSeed.d.ts +1 -1
  25. package/esm/typings/src/version.d.ts +1 -1
  26. package/package.json +2 -2
  27. package/umd/index.umd.js +31 -18
  28. package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js CHANGED
@@ -45,7 +45,7 @@
45
45
  * @generated
46
46
  * @see https://github.com/webgptorg/promptbook
47
47
  */
48
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-12';
48
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-14';
49
49
  /**
50
50
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
51
51
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4145,17 +4145,21 @@
4145
4145
  */
4146
4146
 
4147
4147
  /**
4148
- * Joins multiple LLM Execution Tools into one
4148
+ * Joins multiple LLM Execution Tools into one.
4149
4149
  *
4150
- * @returns {LlmExecutionTools} Single wrapper for multiple LlmExecutionTools
4150
+ * This function takes a list of `LlmExecutionTools` and returns a single unified
4151
+ * `MultipleLlmExecutionTools` object. It provides failover and aggregation logic:
4151
4152
  *
4152
- * 0) If there is no LlmExecutionTools, it warns and returns valid but empty LlmExecutionTools
4153
- * 1) If there is only one LlmExecutionTools, it returns it wrapped in a proxy object
4154
- * 2) If there are multiple LlmExecutionTools, first will be used first, second will be used if the first hasn`t defined model variant or fails, etc.
4155
- * 3) When all LlmExecutionTools fail, it throws an error with a list of all errors merged into one
4153
+ * 1. **Failover**: When a model call is made, it tries providers in the order they were provided.
4154
+ * If the first provider doesn't support the requested model or fails, it tries the next one.
4155
+ * 2. **Aggregation**: `listModels` returns a combined list of all models available from all providers.
4156
+ * 3. **Empty case**: If no tools are provided, it logs a warning (as Promptbook requires LLMs to function).
4156
4157
  *
4158
+ * @param title - A descriptive title for this collection of joined tools
4159
+ * @param llmExecutionTools - An array of execution tools to be joined
4160
+ * @returns A single unified execution tool wrapper
4157
4161
  *
4158
- * Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`
4162
+ * Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`.
4159
4163
  *
4160
4164
  * @public exported from `@promptbook/core`
4161
4165
  */
@@ -4766,8 +4770,8 @@
4766
4770
  */
4767
4771
  function removeDiacritics(input) {
4768
4772
  /*eslint no-control-regex: "off"*/
4769
- return input.replace(/[^\u0000-\u007E]/g, (a) => {
4770
- return DIACRITIC_VARIANTS_LETTERS[a] || a;
4773
+ return input.replace(/[^\u0000-\u007E]/g, (character) => {
4774
+ return DIACRITIC_VARIANTS_LETTERS[character] || character;
4771
4775
  });
4772
4776
  }
4773
4777
  /**
@@ -5458,7 +5462,7 @@
5458
5462
  const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5459
5463
  const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5460
5464
  const { index } = knowledgePiece;
5461
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
5465
+ const knowledgePieceIndex = index.find((knowledgePieceIndex) => knowledgePieceIndex.modelName === firstKnowledgeIndex.modelName);
5462
5466
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5463
5467
  if (knowledgePieceIndex === undefined) {
5464
5468
  return {
@@ -5906,7 +5910,7 @@
5906
5910
  resovedParameterNames = [...resovedParameterNames, currentTask.resultingParameterName];
5907
5911
  })
5908
5912
  .then(() => {
5909
- resolving = resolving.filter((w) => w !== work);
5913
+ resolving = resolving.filter((workItem) => workItem !== work);
5910
5914
  });
5911
5915
  // <- Note: Errors are catched here [3]
5912
5916
  // TODO: BUT if in multiple tasks are errors, only the first one is catched so maybe we should catch errors here and save them to errors array here
@@ -6072,7 +6076,7 @@
6072
6076
  // Calculate and update tldr based on pipeline progress
6073
6077
  const cv = newOngoingResult;
6074
6078
  // Calculate progress based on parameters resolved vs total parameters
6075
- const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
6079
+ const totalParameters = pipeline.parameters.filter((parameter) => !parameter.isInput).length;
6076
6080
  let resolvedParameters = 0;
6077
6081
  let currentTaskTitle = '';
6078
6082
  // Get the resolved parameters from output parameters
@@ -6152,7 +6156,7 @@
6152
6156
  tasks.push(task);
6153
6157
  runningTasks.push(task);
6154
6158
  /* not await */ Promise.resolve(task).then(() => {
6155
- runningTasks = runningTasks.filter((t) => t !== task);
6159
+ runningTasks = runningTasks.filter((runningTask) => runningTask !== task);
6156
6160
  });
6157
6161
  if (maxParallelCount < runningTasks.length) {
6158
6162
  await Promise.race(runningTasks);
@@ -6162,10 +6166,14 @@
6162
6166
  }
6163
6167
 
6164
6168
  /**
6165
- * Intercepts LLM tools and counts total usage of the tools
6169
+ * Intercepts LLM tools and counts total usage of the tools.
6166
6170
  *
6167
- * @param llmTools LLM tools to be intercepted with usage counting
6168
- * @returns LLM tools with same functionality with added total cost counting
6171
+ * This function wraps the provided `LlmExecutionTools` with a proxy that tracks the cumulative
6172
+ * usage (tokens, cost, etc.) across all model calls. It provides a way to monitor spending
6173
+ * in real-time through an observable.
6174
+ *
6175
+ * @param llmTools - The LLM tools to be intercepted and tracked
6176
+ * @returns An augmented version of the tools that includes usage tracking capabilities
6169
6177
  * @public exported from `@promptbook/core`
6170
6178
  */
6171
6179
  function countUsage(llmTools) {
@@ -8832,7 +8840,12 @@
8832
8840
  * @see {@link ModelVariant}
8833
8841
  * @public exported from `@promptbook/core`
8834
8842
  */
8835
- const MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'IMAGE_GENERATION', 'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */];
8843
+ const MODEL_VARIANTS = [
8844
+ 'COMPLETION',
8845
+ 'CHAT',
8846
+ 'IMAGE_GENERATION',
8847
+ 'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */,
8848
+ ];
8836
8849
 
8837
8850
  /**
8838
8851
  * Parses the model command