@promptbook/markdown-utils 0.88.0 → 0.89.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/markdown-utils`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -25,7 +25,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.88.0';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.89.0-1';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2284,8 +2284,9 @@ function addUsage(...usageItems) {
2284
2284
  * @returns LLM tools with same functionality with added total cost counting
2285
2285
  * @public exported from `@promptbook/core`
2286
2286
  */
2287
- function countTotalUsage(llmTools) {
2287
+ function countUsage(llmTools) {
2288
2288
  let totalUsage = ZERO_USAGE;
2289
+ const spending = new Subject();
2289
2290
  const proxyTools = {
2290
2291
  get title() {
2291
2292
  // TODO: [🧠] Maybe put here some suffix
@@ -2295,12 +2296,15 @@ function countTotalUsage(llmTools) {
2295
2296
  // TODO: [🧠] Maybe put here some suffix
2296
2297
  return llmTools.description;
2297
2298
  },
2298
- async checkConfiguration() {
2299
+ checkConfiguration() {
2299
2300
  return /* not await */ llmTools.checkConfiguration();
2300
2301
  },
2301
2302
  listModels() {
2302
2303
  return /* not await */ llmTools.listModels();
2303
2304
  },
2305
+ spending() {
2306
+ return spending.asObservable();
2307
+ },
2304
2308
  getTotalUsage() {
2305
2309
  // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
2306
2310
  return totalUsage;
@@ -2311,6 +2315,7 @@ function countTotalUsage(llmTools) {
2311
2315
  // console.info('[🚕] callChatModel through countTotalUsage');
2312
2316
  const promptResult = await llmTools.callChatModel(prompt);
2313
2317
  totalUsage = addUsage(totalUsage, promptResult.usage);
2318
+ spending.next(promptResult.usage);
2314
2319
  return promptResult;
2315
2320
  };
2316
2321
  }
@@ -2319,6 +2324,7 @@ function countTotalUsage(llmTools) {
2319
2324
  // console.info('[🚕] callCompletionModel through countTotalUsage');
2320
2325
  const promptResult = await llmTools.callCompletionModel(prompt);
2321
2326
  totalUsage = addUsage(totalUsage, promptResult.usage);
2327
+ spending.next(promptResult.usage);
2322
2328
  return promptResult;
2323
2329
  };
2324
2330
  }
@@ -2327,6 +2333,7 @@ function countTotalUsage(llmTools) {
2327
2333
  // console.info('[🚕] callEmbeddingModel through countTotalUsage');
2328
2334
  const promptResult = await llmTools.callEmbeddingModel(prompt);
2329
2335
  totalUsage = addUsage(totalUsage, promptResult.usage);
2336
+ spending.next(promptResult.usage);
2330
2337
  return promptResult;
2331
2338
  };
2332
2339
  }
@@ -3609,7 +3616,7 @@ async function preparePipeline(pipeline, tools, options) {
3609
3616
  // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
3610
3617
  const _llms = arrayableToArray(tools.llm);
3611
3618
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3612
- const llmToolsWithUsage = countTotalUsage(llmTools);
3619
+ const llmToolsWithUsage = countUsage(llmTools);
3613
3620
  // <- TODO: [🌯]
3614
3621
  /*
3615
3622
  TODO: [🧠][🪑][🔃] Should this be done or not