@promptbook/markdown-utils 0.104.0-13 → 0.104.0-14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -23,7 +23,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
23
23
  * @generated
24
24
  * @see https://github.com/webgptorg/promptbook
25
25
  */
26
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-13';
26
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-14';
27
27
  /**
28
28
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
29
29
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3427,7 +3427,7 @@ async function forEachAsync(array, options, callbackfunction) {
3427
3427
  tasks.push(task);
3428
3428
  runningTasks.push(task);
3429
3429
  /* not await */ Promise.resolve(task).then(() => {
3430
- runningTasks = runningTasks.filter((t) => t !== task);
3430
+ runningTasks = runningTasks.filter((runningTask) => runningTask !== task);
3431
3431
  });
3432
3432
  if (maxParallelCount < runningTasks.length) {
3433
3433
  await Promise.race(runningTasks);
@@ -3484,10 +3484,14 @@ function addUsage(...usageItems) {
3484
3484
  }
3485
3485
 
3486
3486
  /**
3487
- * Intercepts LLM tools and counts total usage of the tools
3487
+ * Intercepts LLM tools and counts total usage of the tools.
3488
3488
  *
3489
- * @param llmTools LLM tools to be intercepted with usage counting
3490
- * @returns LLM tools with same functionality with added total cost counting
3489
+ * This function wraps the provided `LlmExecutionTools` with a proxy that tracks the cumulative
3490
+ * usage (tokens, cost, etc.) across all model calls. It provides a way to monitor spending
3491
+ * in real-time through an observable.
3492
+ *
3493
+ * @param llmTools - The LLM tools to be intercepted and tracked
3494
+ * @returns An augmented version of the tools that includes usage tracking capabilities
3491
3495
  * @public exported from `@promptbook/core`
3492
3496
  */
3493
3497
  function countUsage(llmTools) {
@@ -3752,17 +3756,21 @@ class MultipleLlmExecutionTools {
3752
3756
  */
3753
3757
 
3754
3758
  /**
3755
- * Joins multiple LLM Execution Tools into one
3759
+ * Joins multiple LLM Execution Tools into one.
3756
3760
  *
3757
- * @returns {LlmExecutionTools} Single wrapper for multiple LlmExecutionTools
3761
+ * This function takes a list of `LlmExecutionTools` and returns a single unified
3762
+ * `MultipleLlmExecutionTools` object. It provides failover and aggregation logic:
3758
3763
  *
3759
- * 0) If there is no LlmExecutionTools, it warns and returns valid but empty LlmExecutionTools
3760
- * 1) If there is only one LlmExecutionTools, it returns it wrapped in a proxy object
3761
- * 2) If there are multiple LlmExecutionTools, first will be used first, second will be used if the first hasn`t defined model variant or fails, etc.
3762
- * 3) When all LlmExecutionTools fail, it throws an error with a list of all errors merged into one
3764
+ * 1. **Failover**: When a model call is made, it tries providers in the order they were provided.
3765
+ * If the first provider doesn't support the requested model or fails, it tries the next one.
3766
+ * 2. **Aggregation**: `listModels` returns a combined list of all models available from all providers.
3767
+ * 3. **Empty case**: If no tools are provided, it logs a warning (as Promptbook requires LLMs to function).
3763
3768
  *
3769
+ * @param title - A descriptive title for this collection of joined tools
3770
+ * @param llmExecutionTools - An array of execution tools to be joined
3771
+ * @returns A single unified execution tool wrapper
3764
3772
  *
3765
- * Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`
3773
+ * Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`.
3766
3774
  *
3767
3775
  * @public exported from `@promptbook/core`
3768
3776
  */
@@ -4389,8 +4397,8 @@ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
4389
4397
  */
4390
4398
  function removeDiacritics(input) {
4391
4399
  /*eslint no-control-regex: "off"*/
4392
- return input.replace(/[^\u0000-\u007E]/g, (a) => {
4393
- return DIACRITIC_VARIANTS_LETTERS[a] || a;
4400
+ return input.replace(/[^\u0000-\u007E]/g, (character) => {
4401
+ return DIACRITIC_VARIANTS_LETTERS[character] || character;
4394
4402
  });
4395
4403
  }
4396
4404
  /**
@@ -6589,7 +6597,7 @@ async function getKnowledgeForTask(options) {
6589
6597
  const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
6590
6598
  const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
6591
6599
  const { index } = knowledgePiece;
6592
- const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
6600
+ const knowledgePieceIndex = index.find((knowledgePieceIndex) => knowledgePieceIndex.modelName === firstKnowledgeIndex.modelName);
6593
6601
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
6594
6602
  if (knowledgePieceIndex === undefined) {
6595
6603
  return {
@@ -7037,7 +7045,7 @@ async function executePipeline(options) {
7037
7045
  resovedParameterNames = [...resovedParameterNames, currentTask.resultingParameterName];
7038
7046
  })
7039
7047
  .then(() => {
7040
- resolving = resolving.filter((w) => w !== work);
7048
+ resolving = resolving.filter((workItem) => workItem !== work);
7041
7049
  });
7042
7050
  // <- Note: Errors are catched here [3]
7043
7051
  // TODO: BUT if in multiple tasks are errors, only the first one is catched so maybe we should catch errors here and save them to errors array here
@@ -7203,7 +7211,7 @@ function createPipelineExecutor(options) {
7203
7211
  // Calculate and update tldr based on pipeline progress
7204
7212
  const cv = newOngoingResult;
7205
7213
  // Calculate progress based on parameters resolved vs total parameters
7206
- const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
7214
+ const totalParameters = pipeline.parameters.filter((parameter) => !parameter.isInput).length;
7207
7215
  let resolvedParameters = 0;
7208
7216
  let currentTaskTitle = '';
7209
7217
  // Get the resolved parameters from output parameters