@promptbook/core 0.92.0-33 → 0.92.0-34
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +27 -10
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +4 -4
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +27 -10
- package/umd/index.umd.js.map +1 -1
- /package/esm/typings/src/llm-providers/{multiple → _multiple}/MultipleLlmExecutionTools.d.ts +0 -0
- /package/esm/typings/src/llm-providers/{multiple → _multiple}/joinLlmExecutionTools.d.ts +0 -0
- /package/esm/typings/src/llm-providers/{multiple → _multiple}/playground/playground.d.ts +0 -0
|
@@ -102,12 +102,12 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
|
|
|
102
102
|
import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
|
|
103
103
|
import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
|
|
104
104
|
import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
|
|
105
|
+
import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
|
|
106
|
+
import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
|
|
105
107
|
import { _AnthropicClaudeMetadataRegistration } from '../llm-providers/anthropic-claude/register-configuration';
|
|
106
108
|
import { _AzureOpenAiMetadataRegistration } from '../llm-providers/azure-openai/register-configuration';
|
|
107
109
|
import { _DeepseekMetadataRegistration } from '../llm-providers/deepseek/register-configuration';
|
|
108
110
|
import { _GoogleMetadataRegistration } from '../llm-providers/google/register-configuration';
|
|
109
|
-
import { joinLlmExecutionTools } from '../llm-providers/multiple/joinLlmExecutionTools';
|
|
110
|
-
import { MultipleLlmExecutionTools } from '../llm-providers/multiple/MultipleLlmExecutionTools';
|
|
111
111
|
import { _OpenAiMetadataRegistration } from '../llm-providers/openai/register-configuration';
|
|
112
112
|
import { _OpenAiAssistantMetadataRegistration } from '../llm-providers/openai/register-configuration';
|
|
113
113
|
import { migratePipeline } from '../migrations/migratePipeline';
|
|
@@ -249,12 +249,12 @@ export { createLlmToolsFromConfiguration };
|
|
|
249
249
|
export { cacheLlmTools };
|
|
250
250
|
export { countUsage };
|
|
251
251
|
export { limitTotalUsage };
|
|
252
|
+
export { joinLlmExecutionTools };
|
|
253
|
+
export { MultipleLlmExecutionTools };
|
|
252
254
|
export { _AnthropicClaudeMetadataRegistration };
|
|
253
255
|
export { _AzureOpenAiMetadataRegistration };
|
|
254
256
|
export { _DeepseekMetadataRegistration };
|
|
255
257
|
export { _GoogleMetadataRegistration };
|
|
256
|
-
export { joinLlmExecutionTools };
|
|
257
|
-
export { MultipleLlmExecutionTools };
|
|
258
258
|
export { _OpenAiMetadataRegistration };
|
|
259
259
|
export { _OpenAiAssistantMetadataRegistration };
|
|
260
260
|
export { migratePipeline };
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { MultipleLlmExecutionTools } from '../../
|
|
1
|
+
import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
|
|
2
2
|
import type { CreateLlmToolsFromConfigurationOptions } from './createLlmToolsFromConfiguration';
|
|
3
3
|
/**
|
|
4
4
|
* Automatically configures LLM tools from environment variables in Node.js
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import type { string_user_id } from '../../../types/typeAliases';
|
|
2
|
-
import { MultipleLlmExecutionTools } from '../../
|
|
2
|
+
import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
|
|
3
3
|
import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
|
|
4
4
|
/**
|
|
5
5
|
* Options for `$provideLlmToolsFromEnv`
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.92.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.92.0-33`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
package/umd/index.umd.js
CHANGED
|
@@ -27,7 +27,7 @@
|
|
|
27
27
|
* @generated
|
|
28
28
|
* @see https://github.com/webgptorg/promptbook
|
|
29
29
|
*/
|
|
30
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-
|
|
30
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-34';
|
|
31
31
|
/**
|
|
32
32
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
33
33
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -3204,7 +3204,14 @@
|
|
|
3204
3204
|
return 'Multiple LLM Providers';
|
|
3205
3205
|
}
|
|
3206
3206
|
get description() {
|
|
3207
|
-
|
|
3207
|
+
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
|
3208
|
+
.map(({ title, description }, index) => `${index + 1}) \`${title}\`\n${description}`)
|
|
3209
|
+
.join('\n\n');
|
|
3210
|
+
return spaceTrim__default["default"]((block) => `
|
|
3211
|
+
Multiple LLM Providers:
|
|
3212
|
+
|
|
3213
|
+
${block(innerModelsTitlesAndDescriptions)}
|
|
3214
|
+
`);
|
|
3208
3215
|
}
|
|
3209
3216
|
/**
|
|
3210
3217
|
* Check the configuration of all execution tools
|
|
@@ -4558,6 +4565,7 @@
|
|
|
4558
4565
|
*/
|
|
4559
4566
|
async function getKnowledgeForTask(options) {
|
|
4560
4567
|
const { tools, preparedPipeline, task, parameters } = options;
|
|
4568
|
+
console.log('!!! getKnowledgeForTask', options);
|
|
4561
4569
|
const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
|
|
4562
4570
|
const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
|
|
4563
4571
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
@@ -5200,12 +5208,14 @@
|
|
|
5200
5208
|
const spending = new rxjs.Subject();
|
|
5201
5209
|
const proxyTools = {
|
|
5202
5210
|
get title() {
|
|
5203
|
-
|
|
5204
|
-
|
|
5211
|
+
return `${llmTools.title} (+usage)`;
|
|
5212
|
+
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
5213
|
+
// <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
|
|
5205
5214
|
},
|
|
5206
5215
|
get description() {
|
|
5207
|
-
|
|
5208
|
-
|
|
5216
|
+
return `${llmTools.description} (+usage)`;
|
|
5217
|
+
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
5218
|
+
// <- TODO: [🧈][🧠] Does it make sence to suffix "(+usage)"?
|
|
5209
5219
|
},
|
|
5210
5220
|
checkConfiguration() {
|
|
5211
5221
|
return /* not await */ llmTools.checkConfiguration();
|
|
@@ -10350,8 +10360,13 @@
|
|
|
10350
10360
|
const filteredTools = {
|
|
10351
10361
|
// Keep all properties from the original llmTools
|
|
10352
10362
|
...llmTools,
|
|
10363
|
+
get title() {
|
|
10364
|
+
return `${llmTools.title} (filtered)`;
|
|
10365
|
+
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
10366
|
+
},
|
|
10353
10367
|
get description() {
|
|
10354
10368
|
return `${llmTools.description} (filtered)`;
|
|
10369
|
+
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
10355
10370
|
},
|
|
10356
10371
|
// Override listModels to filter the models
|
|
10357
10372
|
async listModels() {
|
|
@@ -10712,12 +10727,14 @@
|
|
|
10712
10727
|
...llmTools,
|
|
10713
10728
|
// <- Note: [🥫]
|
|
10714
10729
|
get title() {
|
|
10715
|
-
|
|
10716
|
-
|
|
10730
|
+
return `${llmTools.title} (cached)`;
|
|
10731
|
+
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
10732
|
+
// <- TODO: [🧈][🧠] Does it make sence to suffix "(cached)"?
|
|
10717
10733
|
},
|
|
10718
10734
|
get description() {
|
|
10719
|
-
|
|
10720
|
-
|
|
10735
|
+
return `${llmTools.description} (cached)`;
|
|
10736
|
+
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
10737
|
+
// <- TODO: [🧈][🧠] Does it make sence to suffix "(cached)"?
|
|
10721
10738
|
},
|
|
10722
10739
|
listModels() {
|
|
10723
10740
|
// TODO: [🧠] Should be model listing also cached?
|