@promptbook/cli 0.88.0 → 0.89.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/cli`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -7,12 +7,12 @@ import { stat, access, constants, readFile, writeFile, readdir, mkdir, unlink, r
7
7
  import hexEncoder from 'crypto-js/enc-hex';
8
8
  import sha256 from 'crypto-js/sha256';
9
9
  import { randomBytes } from 'crypto';
10
+ import { Subject } from 'rxjs';
10
11
  import * as dotenv from 'dotenv';
11
12
  import { spawn } from 'child_process';
12
13
  import JSZip from 'jszip';
13
14
  import { format } from 'prettier';
14
15
  import parserHtml from 'prettier/parser-html';
15
- import { Subject } from 'rxjs';
16
16
  import { parse, unparse } from 'papaparse';
17
17
  import { SHA256 } from 'crypto-js';
18
18
  import { lookup, extension } from 'mime-types';
@@ -44,7 +44,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
44
44
  * @generated
45
45
  * @see https://github.com/webgptorg/promptbook
46
46
  */
47
- const PROMPTBOOK_ENGINE_VERSION = '0.88.0';
47
+ const PROMPTBOOK_ENGINE_VERSION = '0.89.0-1';
48
48
  /**
49
49
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
50
50
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1789,8 +1789,9 @@ function addUsage(...usageItems) {
1789
1789
  * @returns LLM tools with same functionality with added total cost counting
1790
1790
  * @public exported from `@promptbook/core`
1791
1791
  */
1792
- function countTotalUsage(llmTools) {
1792
+ function countUsage(llmTools) {
1793
1793
  let totalUsage = ZERO_USAGE;
1794
+ const spending = new Subject();
1794
1795
  const proxyTools = {
1795
1796
  get title() {
1796
1797
  // TODO: [🧠] Maybe put here some suffix
@@ -1800,12 +1801,15 @@ function countTotalUsage(llmTools) {
1800
1801
  // TODO: [🧠] Maybe put here some suffix
1801
1802
  return llmTools.description;
1802
1803
  },
1803
- async checkConfiguration() {
1804
+ checkConfiguration() {
1804
1805
  return /* not await */ llmTools.checkConfiguration();
1805
1806
  },
1806
1807
  listModels() {
1807
1808
  return /* not await */ llmTools.listModels();
1808
1809
  },
1810
+ spending() {
1811
+ return spending.asObservable();
1812
+ },
1809
1813
  getTotalUsage() {
1810
1814
  // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
1811
1815
  return totalUsage;
@@ -1816,6 +1820,7 @@ function countTotalUsage(llmTools) {
1816
1820
  // console.info('[🚕] callChatModel through countTotalUsage');
1817
1821
  const promptResult = await llmTools.callChatModel(prompt);
1818
1822
  totalUsage = addUsage(totalUsage, promptResult.usage);
1823
+ spending.next(promptResult.usage);
1819
1824
  return promptResult;
1820
1825
  };
1821
1826
  }
@@ -1824,6 +1829,7 @@ function countTotalUsage(llmTools) {
1824
1829
  // console.info('[🚕] callCompletionModel through countTotalUsage');
1825
1830
  const promptResult = await llmTools.callCompletionModel(prompt);
1826
1831
  totalUsage = addUsage(totalUsage, promptResult.usage);
1832
+ spending.next(promptResult.usage);
1827
1833
  return promptResult;
1828
1834
  };
1829
1835
  }
@@ -1832,6 +1838,7 @@ function countTotalUsage(llmTools) {
1832
1838
  // console.info('[🚕] callEmbeddingModel through countTotalUsage');
1833
1839
  const promptResult = await llmTools.callEmbeddingModel(prompt);
1834
1840
  totalUsage = addUsage(totalUsage, promptResult.usage);
1841
+ spending.next(promptResult.usage);
1835
1842
  return promptResult;
1836
1843
  };
1837
1844
  }
@@ -2539,7 +2546,7 @@ async function $provideLlmToolsForWizzardOrCli(options) {
2539
2546
  throw new EnvironmentMismatchError('Function `$provideLlmToolsForWizzardOrCli` works only in Node.js environment');
2540
2547
  }
2541
2548
  const { isCacheReloaded } = options !== null && options !== void 0 ? options : {};
2542
- return cacheLlmTools(countTotalUsage(
2549
+ return cacheLlmTools(countUsage(
2543
2550
  // <- Note: for example here we don`t want the [🌯]
2544
2551
  await $provideLlmToolsFromEnv()), {
2545
2552
  storage: new FileCacheStorage({ fs: $provideFilesystemForNode() }, {
@@ -6767,7 +6774,7 @@ async function preparePipeline(pipeline, tools, options) {
6767
6774
  // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6768
6775
  const _llms = arrayableToArray(tools.llm);
6769
6776
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6770
- const llmToolsWithUsage = countTotalUsage(llmTools);
6777
+ const llmToolsWithUsage = countUsage(llmTools);
6771
6778
  // <- TODO: [🌯]
6772
6779
  /*
6773
6780
  TODO: [🧠][🪑][🔃] Should this be done or not