@promptbook/node 0.92.0-33 → 0.92.0-34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -102,12 +102,12 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
102
102
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
103
103
  import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
104
104
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
105
+ import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
106
+ import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
105
107
  import { _AnthropicClaudeMetadataRegistration } from '../llm-providers/anthropic-claude/register-configuration';
106
108
  import { _AzureOpenAiMetadataRegistration } from '../llm-providers/azure-openai/register-configuration';
107
109
  import { _DeepseekMetadataRegistration } from '../llm-providers/deepseek/register-configuration';
108
110
  import { _GoogleMetadataRegistration } from '../llm-providers/google/register-configuration';
109
- import { joinLlmExecutionTools } from '../llm-providers/multiple/joinLlmExecutionTools';
110
- import { MultipleLlmExecutionTools } from '../llm-providers/multiple/MultipleLlmExecutionTools';
111
111
  import { _OpenAiMetadataRegistration } from '../llm-providers/openai/register-configuration';
112
112
  import { _OpenAiAssistantMetadataRegistration } from '../llm-providers/openai/register-configuration';
113
113
  import { migratePipeline } from '../migrations/migratePipeline';
@@ -249,12 +249,12 @@ export { createLlmToolsFromConfiguration };
249
249
  export { cacheLlmTools };
250
250
  export { countUsage };
251
251
  export { limitTotalUsage };
252
+ export { joinLlmExecutionTools };
253
+ export { MultipleLlmExecutionTools };
252
254
  export { _AnthropicClaudeMetadataRegistration };
253
255
  export { _AzureOpenAiMetadataRegistration };
254
256
  export { _DeepseekMetadataRegistration };
255
257
  export { _GoogleMetadataRegistration };
256
- export { joinLlmExecutionTools };
257
- export { MultipleLlmExecutionTools };
258
258
  export { _OpenAiMetadataRegistration };
259
259
  export { _OpenAiAssistantMetadataRegistration };
260
260
  export { migratePipeline };
@@ -1,4 +1,4 @@
1
- import { MultipleLlmExecutionTools } from '../../multiple/MultipleLlmExecutionTools';
1
+ import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
2
2
  import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
3
3
  /**
4
4
  * Automatically configures LLM tools from environment variables in Node.js
@@ -1,5 +1,5 @@
1
1
  import type { string_user_id } from '../../../types/typeAliases';
2
- import { MultipleLlmExecutionTools } from '../../multiple/MultipleLlmExecutionTools';
2
+ import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
3
3
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
4
  /**
5
5
  * Options for `$provideLlmToolsFromEnv`
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.92.0-32`).
18
+ * It follows semantic versioning (e.g., `0.92.0-33`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.92.0-33",
3
+ "version": "0.92.0-34",
4
4
  "description": "It's time for a paradigm shift. The future of software in plain English, French or Latin",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  "module": "./esm/index.es.js",
52
52
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.92.0-33"
54
+ "@promptbook/core": "0.92.0-34"
55
55
  },
56
56
  "dependencies": {
57
57
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -46,7 +46,7 @@
46
46
  * @generated
47
47
  * @see https://github.com/webgptorg/promptbook
48
48
  */
49
- const PROMPTBOOK_ENGINE_VERSION = '0.92.0-33';
49
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-34';
50
50
  /**
51
51
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
52
52
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2938,7 +2938,14 @@
2938
2938
  return 'Multiple LLM Providers';
2939
2939
  }
2940
2940
  get description() {
2941
- return this.llmExecutionTools.map(({ title }, index) => `${index + 1}) \`${title}\``).join('\n');
2941
+ const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2942
+ .map(({ title, description }, index) => `${index + 1}) \`${title}\`\n${description}`)
2943
+ .join('\n\n');
2944
+ return spaceTrim__default["default"]((block) => `
2945
+ Multiple LLM Providers:
2946
+
2947
+ ${block(innerModelsTitlesAndDescriptions)}
2948
+ `);
2942
2949
  }
2943
2950
  /**
2944
2951
  * Check the configuration of all execution tools
@@ -4270,6 +4277,7 @@
4270
4277
  */
4271
4278
  async function getKnowledgeForTask(options) {
4272
4279
  const { tools, preparedPipeline, task, parameters } = options;
4280
+ console.log('!!! getKnowledgeForTask', options);
4273
4281
  const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
4274
4282
  const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
4275
4283
  // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
@@ -4912,12 +4920,14 @@
4912
4920
  const spending = new rxjs.Subject();
4913
4921
  const proxyTools = {
4914
4922
  get title() {
4915
- // TODO: [🧠] Maybe put here some suffix
4916
- return llmTools.title;
4923
+ return `${llmTools.title} (+usage)`;
4924
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
4925
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
4917
4926
  },
4918
4927
  get description() {
4919
- // TODO: [🧠] Maybe put here some suffix
4920
- return llmTools.description;
4928
+ return `${llmTools.description} (+usage)`;
4929
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
4930
+ // <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
4921
4931
  },
4922
4932
  checkConfiguration() {
4923
4933
  return /* not await */ llmTools.checkConfiguration();