@promptbook/legacy-documents 0.92.0-33 → 0.92.0-34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -102,12 +102,12 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
102
102
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
103
103
  import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
104
104
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
105
+ import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
106
+ import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
105
107
  import { _AnthropicClaudeMetadataRegistration } from '../llm-providers/anthropic-claude/register-configuration';
106
108
  import { _AzureOpenAiMetadataRegistration } from '../llm-providers/azure-openai/register-configuration';
107
109
  import { _DeepseekMetadataRegistration } from '../llm-providers/deepseek/register-configuration';
108
110
  import { _GoogleMetadataRegistration } from '../llm-providers/google/register-configuration';
109
- import { joinLlmExecutionTools } from '../llm-providers/multiple/joinLlmExecutionTools';
110
- import { MultipleLlmExecutionTools } from '../llm-providers/multiple/MultipleLlmExecutionTools';
111
111
  import { _OpenAiMetadataRegistration } from '../llm-providers/openai/register-configuration';
112
112
  import { _OpenAiAssistantMetadataRegistration } from '../llm-providers/openai/register-configuration';
113
113
  import { migratePipeline } from '../migrations/migratePipeline';
@@ -249,12 +249,12 @@ export { createLlmToolsFromConfiguration };
249
249
  export { cacheLlmTools };
250
250
  export { countUsage };
251
251
  export { limitTotalUsage };
252
+ export { joinLlmExecutionTools };
253
+ export { MultipleLlmExecutionTools };
252
254
  export { _AnthropicClaudeMetadataRegistration };
253
255
  export { _AzureOpenAiMetadataRegistration };
254
256
  export { _DeepseekMetadataRegistration };
255
257
  export { _GoogleMetadataRegistration };
256
- export { joinLlmExecutionTools };
257
- export { MultipleLlmExecutionTools };
258
258
  export { _OpenAiMetadataRegistration };
259
259
  export { _OpenAiAssistantMetadataRegistration };
260
260
  export { migratePipeline };
@@ -1,4 +1,4 @@
1
- import { MultipleLlmExecutionTools } from '../../multiple/MultipleLlmExecutionTools';
1
+ import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
2
2
  import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
3
3
  /**
4
4
  * Automatically configures LLM tools from environment variables in Node.js
@@ -1,5 +1,5 @@
1
1
  import type { string_user_id } from '../../../types/typeAliases';
2
- import { MultipleLlmExecutionTools } from '../../multiple/MultipleLlmExecutionTools';
2
+ import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
3
3
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
4
  /**
5
5
  * Options for `$provideLlmToolsFromEnv`
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.92.0-32`).
18
+ * It follows semantic versioning (e.g., `0.92.0-33`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/legacy-documents",
3
- "version": "0.92.0-33",
3
+ "version": "0.92.0-34",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  "module": "./esm/index.es.js",
52
52
  "typings": "./esm/typings/src/_packages/legacy-documents.index.d.ts",
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.92.0-33"
54
+ "@promptbook/core": "0.92.0-34"
55
55
  },
56
56
  "dependencies": {
57
57
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -26,7 +26,7 @@
26
26
  * @generated
27
27
  * @see https://github.com/webgptorg/promptbook
28
28
  */
29
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-33';
29
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-34';
30
30
  /**
31
31
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
32
32
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2932,12 +2932,14 @@
2932
2932
  const spending = new rxjs.Subject();
2933
2933
  const proxyTools = {
2934
2934
  get title() {
2935
- // TODO: [🧠] Maybe put here some suffix
2936
- return llmTools.title;
2935
+ return `${llmTools.title} (+usage)`;
2936
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2937
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2937
2938
  },
2938
2939
  get description() {
2939
- // TODO: [🧠] Maybe put here some suffix
2940
- return llmTools.description;
2940
+ return `${llmTools.description} (+usage)`;
2941
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
2942
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
2941
2943
  },
2942
2944
  checkConfiguration() {
2943
2945
  return /* not await */ llmTools.checkConfiguration();
@@ -3008,7 +3010,14 @@
3008
3010
  return 'Multiple LLM Providers';
3009
3011
  }
3010
3012
  get description() {
3011
- return this.llmExecutionTools.map(({ title }, index) => `${index + 1}) \`${title}\``).join('\n');
3013
+ const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3014
+ .map(({ title, description }, index) => `${index + 1}) \`${title}\`\n${description}`)
3015
+ .join('\n\n');
3016
+ return spaceTrim__default["default"]((block) => `
3017
+ Multiple LLM Providers:
3018
+
3019
+ ${block(innerModelsTitlesAndDescriptions)}
3020
+ `);
3012
3021
  }
3013
3022
  /**
3014
3023
  * Check the configuration of all execution tools
@@ -5487,6 +5496,7 @@
5487
5496
  */
5488
5497
  async function getKnowledgeForTask(options) {
5489
5498
  const { tools, preparedPipeline, task, parameters } = options;
5499
+ console.log('!!! getKnowledgeForTask', options);
5490
5500
  const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5491
5501
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5492
5502
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
@@ -6208,6 +6218,12 @@
6208
6218
  }
6209
6219
  // ---
6210
6220
  if (!llmTools.callEmbeddingModel) {
6221
+ console.log('!!! No callEmbeddingModel function provided', {
6222
+ 'llmTools.title': llmTools.title,
6223
+ 'llmTools.description': llmTools.description,
6224
+ 'llmTools.callEmbeddingModel': llmTools.callEmbeddingModel,
6225
+ llmTools,
6226
+ });
6211
6227
  // TODO: [🟥] Detect browser / node and make it colorfull
6212
6228
  console.error('No callEmbeddingModel function provided');
6213
6229
  }