@promptbook/markitdown 0.92.0-33 → 0.92.0-34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -26,7 +26,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
26
26
  * @generated
27
27
  * @see https://github.com/webgptorg/promptbook
28
28
  */
29
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-33';
29
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-34';
30
30
  /**
31
31
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
32
32
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2760,12 +2760,14 @@ function countUsage(llmTools) {
2760
2760
  const spending = new Subject();
2761
2761
  const proxyTools = {
2762
2762
  get title() {
2763
- // TODO: [🧠] Maybe put here some suffix
2764
- return llmTools.title;
2763
+ return `${llmTools.title} (+usage)`;
2764
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2765
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2765
2766
  },
2766
2767
  get description() {
2767
- // TODO: [🧠] Maybe put here some suffix
2768
- return llmTools.description;
2768
+ return `${llmTools.description} (+usage)`;
2769
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2770
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2769
2771
  },
2770
2772
  checkConfiguration() {
2771
2773
  return /* not await */ llmTools.checkConfiguration();
@@ -2836,7 +2838,14 @@ class MultipleLlmExecutionTools {
2836
2838
  return 'Multiple LLM Providers';
2837
2839
  }
2838
2840
  get description() {
2839
- return this.llmExecutionTools.map(({ title }, index) => `${index + 1}) \`${title}\``).join('\n');
2841
+ const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2842
+ .map(({ title, description }, index) => `${index + 1}) \`${title}\`\n${description}`)
2843
+ .join('\n\n');
2844
+ return spaceTrim((block) => `
2845
+ Multiple LLM Providers:
2846
+
2847
+ ${block(innerModelsTitlesAndDescriptions)}
2848
+ `);
2840
2849
  }
2841
2850
  /**
2842
2851
  * Check the configuration of all execution tools
@@ -5325,6 +5334,7 @@ function knowledgePiecesToString(knowledgePieces) {
5325
5334
  */
5326
5335
  async function getKnowledgeForTask(options) {
5327
5336
  const { tools, preparedPipeline, task, parameters } = options;
5337
+ console.log('!!! getKnowledgeForTask', options);
5328
5338
  const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5329
5339
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5330
5340
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
@@ -6046,6 +6056,12 @@ class MarkdownScraper {
6046
6056
  }
6047
6057
  // ---
6048
6058
  if (!llmTools.callEmbeddingModel) {
6059
+ console.log('!!! No callEmbeddingModel function provided', {
6060
+ 'llmTools.title': llmTools.title,
6061
+ 'llmTools.description': llmTools.description,
6062
+ 'llmTools.callEmbeddingModel': llmTools.callEmbeddingModel,
6063
+ llmTools,
6064
+ });
6049
6065
  // TODO: [🟥] Detect browser / node and make it colorfull
6050
6066
  console.error('No callEmbeddingModel function provided');
6051
6067
  }