@promptbook/remote-server 0.92.0-33 → 0.92.0-34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -33,7 +33,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
33
33
  * @generated
34
34
  * @see https://github.com/webgptorg/promptbook
35
35
  */
36
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-33';
36
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-34';
37
37
  /**
38
38
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
39
39
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2711,12 +2711,14 @@ function countUsage(llmTools) {
2711
2711
  const spending = new Subject();
2712
2712
  const proxyTools = {
2713
2713
  get title() {
2714
- // TODO: [🧠] Maybe put here some suffix
2715
- return llmTools.title;
2714
+ return `${llmTools.title} (+usage)`;
2715
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2716
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2716
2717
  },
2717
2718
  get description() {
2718
- // TODO: [🧠] Maybe put here some suffix
2719
- return llmTools.description;
2719
+ return `${llmTools.description} (+usage)`;
2720
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2721
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2720
2722
  },
2721
2723
  checkConfiguration() {
2722
2724
  return /* not await */ llmTools.checkConfiguration();
@@ -2787,7 +2789,14 @@ class MultipleLlmExecutionTools {
2787
2789
  return 'Multiple LLM Providers';
2788
2790
  }
2789
2791
  get description() {
2790
- return this.llmExecutionTools.map(({ title }, index) => `${index + 1}) \`${title}\``).join('\n');
2792
+ const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2793
+ .map(({ title, description }, index) => `${index + 1}) \`${title}\`\n${description}`)
2794
+ .join('\n\n');
2795
+ return spaceTrim((block) => `
2796
+ Multiple LLM Providers:
2797
+
2798
+ ${block(innerModelsTitlesAndDescriptions)}
2799
+ `);
2791
2800
  }
2792
2801
  /**
2793
2802
  * Check the configuration of all execution tools
@@ -5680,6 +5689,7 @@ function knowledgePiecesToString(knowledgePieces) {
5680
5689
  */
5681
5690
  async function getKnowledgeForTask(options) {
5682
5691
  const { tools, preparedPipeline, task, parameters } = options;
5692
+ console.log('!!! getKnowledgeForTask', options);
5683
5693
  const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5684
5694
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5685
5695
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search