@promptbook/wizard 0.98.0-5 โ 0.98.0-9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +218 -52
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -2
- package/esm/typings/src/_packages/openai.index.d.ts +4 -0
- package/esm/typings/src/_packages/types.index.d.ts +10 -2
- package/esm/typings/src/config.d.ts +1 -1
- package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +1 -0
- package/esm/typings/src/execution/utils/validatePromptResult.d.ts +53 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +4 -4
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionToolsOptions.d.ts +42 -1
- package/esm/typings/src/llm-providers/openai/createOpenAiCompatibleExecutionTools.d.ts +58 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +218 -52
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
@@ -38,7 +38,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
38
38
|
* @generated
|
39
39
|
* @see https://github.com/webgptorg/promptbook
|
40
40
|
*/
|
41
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.98.0-
|
41
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.98.0-9';
|
42
42
|
/**
|
43
43
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
44
44
|
* Note: [๐] Ignore a discrepancy between file name and entity name
|
@@ -232,7 +232,7 @@ const DEFAULT_MAX_PARALLEL_COUNT = 5; // <- TODO: [๐คนโโ๏ธ]
|
|
232
232
|
*
|
233
233
|
* @public exported from `@promptbook/core`
|
234
234
|
*/
|
235
|
-
const DEFAULT_MAX_EXECUTION_ATTEMPTS =
|
235
|
+
const DEFAULT_MAX_EXECUTION_ATTEMPTS = 7; // <- TODO: [๐คนโโ๏ธ]
|
236
236
|
// <- TODO: [๐]
|
237
237
|
/**
|
238
238
|
* Where to store your books
|
@@ -4345,7 +4345,7 @@ resultContent, rawResponse) {
|
|
4345
4345
|
*/
|
4346
4346
|
|
4347
4347
|
/**
|
4348
|
-
* Execution Tools for calling OpenAI API or other
|
4348
|
+
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
4349
4349
|
*
|
4350
4350
|
* @public exported from `@promptbook/openai`
|
4351
4351
|
*/
|
@@ -4915,6 +4915,7 @@ class OllamaExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
4915
4915
|
baseURL: DEFAULT_OLLAMA_BASE_URL,
|
4916
4916
|
...ollamaOptions,
|
4917
4917
|
apiKey: 'ollama',
|
4918
|
+
isProxied: false, // <- Note: Ollama is always local
|
4918
4919
|
};
|
4919
4920
|
super(openAiCompatibleOptions);
|
4920
4921
|
}
|
@@ -5101,7 +5102,7 @@ const _OpenAiCompatibleMetadataRegistration = $llmToolsMetadataRegister.register
|
|
5101
5102
|
title: 'Open AI Compatible',
|
5102
5103
|
packageName: '@promptbook/openai',
|
5103
5104
|
className: 'OpenAiCompatibleExecutionTools',
|
5104
|
-
envVariables: ['OPENAI_API_KEY'],
|
5105
|
+
envVariables: ['OPENAI_API_KEY', 'OPENAI_BASE_URL'],
|
5105
5106
|
trustLevel: 'CLOSED',
|
5106
5107
|
order: MODEL_ORDERS.TOP_TIER,
|
5107
5108
|
getBoilerplateConfiguration() {
|
@@ -5111,6 +5112,9 @@ const _OpenAiCompatibleMetadataRegistration = $llmToolsMetadataRegister.register
|
|
5111
5112
|
className: 'OpenAiCompatibleExecutionTools',
|
5112
5113
|
options: {
|
5113
5114
|
apiKey: 'sk-',
|
5115
|
+
baseURL: 'https://api.openai.com/v1',
|
5116
|
+
isProxied: false,
|
5117
|
+
remoteServerUrl: DEFAULT_REMOTE_SERVER_URL,
|
5114
5118
|
maxRequestsPerMinute: DEFAULT_MAX_REQUESTS_PER_MINUTE,
|
5115
5119
|
},
|
5116
5120
|
};
|
@@ -5168,7 +5172,7 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
5168
5172
|
* Default model for chat variant.
|
5169
5173
|
*/
|
5170
5174
|
getDefaultChatModel() {
|
5171
|
-
return this.getDefaultModel('gpt-
|
5175
|
+
return this.getDefaultModel('gpt-4-turbo');
|
5172
5176
|
}
|
5173
5177
|
/**
|
5174
5178
|
* Default model for completion variant.
|
@@ -5198,6 +5202,9 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
5198
5202
|
* @param options which are relevant are directly passed to the OpenAI client
|
5199
5203
|
*/
|
5200
5204
|
constructor(options) {
|
5205
|
+
if (options.isProxied) {
|
5206
|
+
throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI assistants`);
|
5207
|
+
}
|
5201
5208
|
super(options);
|
5202
5209
|
this.assistantId = options.assistantId;
|
5203
5210
|
// TODO: [๐ฑ] Make limiter same as in `OpenAiExecutionTools`
|
@@ -5379,14 +5386,97 @@ const createOpenAiAssistantExecutionTools = Object.assign((options) => {
|
|
5379
5386
|
* @public exported from `@promptbook/openai`
|
5380
5387
|
*/
|
5381
5388
|
const createOpenAiCompatibleExecutionTools = Object.assign((options) => {
|
5389
|
+
if (options.isProxied) {
|
5390
|
+
return new RemoteLlmExecutionTools({
|
5391
|
+
...options,
|
5392
|
+
identification: {
|
5393
|
+
isAnonymous: true,
|
5394
|
+
llmToolsConfiguration: [
|
5395
|
+
{
|
5396
|
+
title: 'OpenAI Compatible (proxied)',
|
5397
|
+
packageName: '@promptbook/openai',
|
5398
|
+
className: 'OpenAiCompatibleExecutionTools',
|
5399
|
+
options: {
|
5400
|
+
...options,
|
5401
|
+
isProxied: false,
|
5402
|
+
},
|
5403
|
+
},
|
5404
|
+
],
|
5405
|
+
},
|
5406
|
+
});
|
5407
|
+
}
|
5382
5408
|
if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
|
5383
5409
|
options = { ...options, dangerouslyAllowBrowser: true };
|
5384
5410
|
}
|
5385
|
-
return new
|
5411
|
+
return new HardcodedOpenAiCompatibleExecutionTools(options.defaultModelName, options);
|
5386
5412
|
}, {
|
5387
5413
|
packageName: '@promptbook/openai',
|
5388
5414
|
className: 'OpenAiCompatibleExecutionTools',
|
5389
5415
|
});
|
5416
|
+
/**
|
5417
|
+
* Execution Tools for calling ONE SPECIFIC PRECONFIGURED OpenAI compatible provider
|
5418
|
+
*
|
5419
|
+
* @private for `createOpenAiCompatibleExecutionTools`
|
5420
|
+
*/
|
5421
|
+
class HardcodedOpenAiCompatibleExecutionTools extends OpenAiCompatibleExecutionTools {
|
5422
|
+
/**
|
5423
|
+
* Creates OpenAI compatible Execution Tools.
|
5424
|
+
*
|
5425
|
+
* @param options which are relevant are directly passed to the OpenAI compatible client
|
5426
|
+
*/
|
5427
|
+
constructor(defaultModelName, options) {
|
5428
|
+
super(options);
|
5429
|
+
this.defaultModelName = defaultModelName;
|
5430
|
+
this.options = options;
|
5431
|
+
}
|
5432
|
+
get title() {
|
5433
|
+
return `${this.defaultModelName} on ${this.options.baseURL}`;
|
5434
|
+
}
|
5435
|
+
get description() {
|
5436
|
+
return `OpenAI compatible connected to "${this.options.baseURL}" model "${this.defaultModelName}"`;
|
5437
|
+
}
|
5438
|
+
/**
|
5439
|
+
* List all available models (non dynamically)
|
5440
|
+
*
|
5441
|
+
* Note: Purpose of this is to provide more information about models than standard listing from API
|
5442
|
+
*/
|
5443
|
+
get HARDCODED_MODELS() {
|
5444
|
+
return [
|
5445
|
+
{
|
5446
|
+
modelName: this.defaultModelName,
|
5447
|
+
modelVariant: 'CHAT',
|
5448
|
+
modelDescription: '', // <- TODO: What is the best value here, maybe `this.description`?
|
5449
|
+
},
|
5450
|
+
];
|
5451
|
+
}
|
5452
|
+
/**
|
5453
|
+
* Computes the usage
|
5454
|
+
*/
|
5455
|
+
computeUsage(...args) {
|
5456
|
+
return {
|
5457
|
+
...computeOpenAiUsage(...args),
|
5458
|
+
price: UNCERTAIN_ZERO_VALUE, // <- TODO: Maybe in future pass this counting mechanism, but for now, we dont know
|
5459
|
+
};
|
5460
|
+
}
|
5461
|
+
/**
|
5462
|
+
* Default model for chat variant.
|
5463
|
+
*/
|
5464
|
+
getDefaultChatModel() {
|
5465
|
+
return this.getDefaultModel(this.defaultModelName);
|
5466
|
+
}
|
5467
|
+
/**
|
5468
|
+
* Default model for completion variant.
|
5469
|
+
*/
|
5470
|
+
getDefaultCompletionModel() {
|
5471
|
+
throw new PipelineExecutionError(`${this.title} does not support COMPLETION model variant`);
|
5472
|
+
}
|
5473
|
+
/**
|
5474
|
+
* Default model for completion variant.
|
5475
|
+
*/
|
5476
|
+
getDefaultEmbeddingModel() {
|
5477
|
+
throw new PipelineExecutionError(`${this.title} does not support EMBEDDING model variant`);
|
5478
|
+
}
|
5479
|
+
}
|
5390
5480
|
/**
|
5391
5481
|
* TODO: [๐ฆบ] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
5392
5482
|
* TODO: [๐ถ] Naming "constructor" vs "creator" vs "factory"
|
@@ -5403,6 +5493,9 @@ const createOpenAiExecutionTools = Object.assign((options) => {
|
|
5403
5493
|
if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
|
5404
5494
|
options = { ...options, dangerouslyAllowBrowser: true };
|
5405
5495
|
}
|
5496
|
+
if (options.isProxied) {
|
5497
|
+
throw new NotYetImplementedError(`Proxy mode is not yet implemented in createOpenAiExecutionTools`);
|
5498
|
+
}
|
5406
5499
|
return new OpenAiExecutionTools(options);
|
5407
5500
|
}, {
|
5408
5501
|
packageName: '@promptbook/openai',
|
@@ -6782,7 +6875,7 @@ function jsonParse(value) {
|
|
6782
6875
|
throw new Error(spaceTrim((block) => `
|
6783
6876
|
${block(error.message)}
|
6784
6877
|
|
6785
|
-
The JSON text:
|
6878
|
+
The expected JSON text:
|
6786
6879
|
${block(value)}
|
6787
6880
|
`));
|
6788
6881
|
}
|
@@ -8691,6 +8784,68 @@ function checkExpectations(expectations, value) {
|
|
8691
8784
|
* Note: [๐] and [๐ค ] are interconnected together
|
8692
8785
|
*/
|
8693
8786
|
|
8787
|
+
/**
|
8788
|
+
* Validates a prompt result against expectations and format requirements.
|
8789
|
+
* This function provides a common abstraction for result validation that can be used
|
8790
|
+
* by both execution logic and caching logic to ensure consistency.
|
8791
|
+
*
|
8792
|
+
* @param options - The validation options including result string, expectations, and format
|
8793
|
+
* @returns Validation result with processed string and validity status
|
8794
|
+
* @private internal function of `createPipelineExecutor` and `cacheLlmTools`
|
8795
|
+
*/
|
8796
|
+
function validatePromptResult(options) {
|
8797
|
+
const { resultString, expectations, format } = options;
|
8798
|
+
let processedResultString = resultString;
|
8799
|
+
let validationError;
|
8800
|
+
try {
|
8801
|
+
// TODO: [๐] Unite object for expecting amount and format
|
8802
|
+
if (format) {
|
8803
|
+
if (format === 'JSON') {
|
8804
|
+
if (!isValidJsonString(processedResultString)) {
|
8805
|
+
// TODO: [๐ข] Do more universally via `FormatParser`
|
8806
|
+
try {
|
8807
|
+
processedResultString = extractJsonBlock(processedResultString);
|
8808
|
+
}
|
8809
|
+
catch (error) {
|
8810
|
+
keepUnused(error);
|
8811
|
+
throw new ExpectError(spaceTrim$1((block) => `
|
8812
|
+
Expected valid JSON string
|
8813
|
+
|
8814
|
+
The expected JSON text:
|
8815
|
+
${block(processedResultString)}
|
8816
|
+
`));
|
8817
|
+
}
|
8818
|
+
}
|
8819
|
+
}
|
8820
|
+
else {
|
8821
|
+
throw new UnexpectedError(`Unknown format "${format}"`);
|
8822
|
+
}
|
8823
|
+
}
|
8824
|
+
// TODO: [๐] Unite object for expecting amount and format
|
8825
|
+
if (expectations) {
|
8826
|
+
checkExpectations(expectations, processedResultString);
|
8827
|
+
}
|
8828
|
+
return {
|
8829
|
+
isValid: true,
|
8830
|
+
processedResultString,
|
8831
|
+
};
|
8832
|
+
}
|
8833
|
+
catch (error) {
|
8834
|
+
if (error instanceof ExpectError) {
|
8835
|
+
validationError = error;
|
8836
|
+
}
|
8837
|
+
else {
|
8838
|
+
// Re-throw non-ExpectError errors (like UnexpectedError)
|
8839
|
+
throw error;
|
8840
|
+
}
|
8841
|
+
return {
|
8842
|
+
isValid: false,
|
8843
|
+
processedResultString,
|
8844
|
+
error: validationError,
|
8845
|
+
};
|
8846
|
+
}
|
8847
|
+
}
|
8848
|
+
|
8694
8849
|
/**
|
8695
8850
|
* Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
|
8696
8851
|
* (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
|
@@ -8713,13 +8868,13 @@ async function executeAttempts(options) {
|
|
8713
8868
|
// TODO: [๐] Make arrayable LLMs -> single LLM DRY
|
8714
8869
|
const _llms = arrayableToArray(tools.llm);
|
8715
8870
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
8716
|
-
attempts: for (let
|
8717
|
-
const isJokerAttempt =
|
8718
|
-
const jokerParameterName = jokerParameterNames[jokerParameterNames.length +
|
8871
|
+
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
8872
|
+
const isJokerAttempt = attemptIndex < 0;
|
8873
|
+
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
8719
8874
|
// TODO: [๐ง ][๐ญ] JOKERS, EXPECTATIONS, POSTPROCESSING and FOREACH
|
8720
8875
|
if (isJokerAttempt && !jokerParameterName) {
|
8721
8876
|
throw new UnexpectedError(spaceTrim$1((block) => `
|
8722
|
-
Joker not found in attempt ${
|
8877
|
+
Joker not found in attempt ${attemptIndex}
|
8723
8878
|
|
8724
8879
|
${block(pipelineIdentification)}
|
8725
8880
|
`));
|
@@ -8917,35 +9072,18 @@ async function executeAttempts(options) {
|
|
8917
9072
|
}
|
8918
9073
|
}
|
8919
9074
|
// TODO: [๐] Unite object for expecting amount and format
|
8920
|
-
|
8921
|
-
|
8922
|
-
|
8923
|
-
|
8924
|
-
|
8925
|
-
|
8926
|
-
|
8927
|
-
|
8928
|
-
|
8929
|
-
throw new ExpectError(spaceTrim$1((block) => `
|
8930
|
-
Expected valid JSON string
|
8931
|
-
|
8932
|
-
${block(
|
8933
|
-
/*<- Note: No need for `pipelineIdentification`, it will be catched and added later */ '')}
|
8934
|
-
`));
|
8935
|
-
}
|
8936
|
-
}
|
8937
|
-
}
|
8938
|
-
else {
|
8939
|
-
throw new UnexpectedError(spaceTrim$1((block) => `
|
8940
|
-
Unknown format "${task.format}"
|
8941
|
-
|
8942
|
-
${block(pipelineIdentification)}
|
8943
|
-
`));
|
9075
|
+
// Use the common validation function for both format and expectations
|
9076
|
+
if (task.format || task.expectations) {
|
9077
|
+
const validationResult = validatePromptResult({
|
9078
|
+
resultString: $ongoingTaskResult.$resultString || '',
|
9079
|
+
expectations: task.expectations,
|
9080
|
+
format: task.format,
|
9081
|
+
});
|
9082
|
+
if (!validationResult.isValid) {
|
9083
|
+
throw validationResult.error;
|
8944
9084
|
}
|
8945
|
-
|
8946
|
-
|
8947
|
-
if (task.expectations) {
|
8948
|
-
checkExpectations(task.expectations, $ongoingTaskResult.$resultString || '');
|
9085
|
+
// Update the result string in case format processing modified it (e.g., JSON extraction)
|
9086
|
+
$ongoingTaskResult.$resultString = validationResult.processedResultString;
|
8949
9087
|
}
|
8950
9088
|
break attempts;
|
8951
9089
|
}
|
@@ -8959,6 +9097,7 @@ async function executeAttempts(options) {
|
|
8959
9097
|
$ongoingTaskResult.$failedResults = [];
|
8960
9098
|
}
|
8961
9099
|
$ongoingTaskResult.$failedResults.push({
|
9100
|
+
attemptIndex,
|
8962
9101
|
result: $ongoingTaskResult.$resultString,
|
8963
9102
|
error: error,
|
8964
9103
|
});
|
@@ -8983,19 +9122,13 @@ async function executeAttempts(options) {
|
|
8983
9122
|
});
|
8984
9123
|
}
|
8985
9124
|
}
|
8986
|
-
if ($ongoingTaskResult.$expectError !== null &&
|
8987
|
-
//
|
8988
|
-
$ongoingTaskResult.$failedResults = $ongoingTaskResult.$failedResults || [];
|
8989
|
-
$ongoingTaskResult.$failedResults.push({
|
8990
|
-
result: $ongoingTaskResult.$resultString,
|
8991
|
-
error: $ongoingTaskResult.$expectError,
|
8992
|
-
});
|
8993
|
-
// Create a summary of all failures
|
9125
|
+
if ($ongoingTaskResult.$expectError !== null && attemptIndex === maxAttempts - 1) {
|
9126
|
+
// Note: Create a summary of all failures
|
8994
9127
|
const failuresSummary = $ongoingTaskResult.$failedResults
|
8995
|
-
.map((failure
|
9128
|
+
.map((failure) => spaceTrim$1((block) => {
|
8996
9129
|
var _a, _b;
|
8997
9130
|
return `
|
8998
|
-
Attempt ${
|
9131
|
+
Attempt ${failure.attemptIndex + 1}:
|
8999
9132
|
Error ${((_a = failure.error) === null || _a === void 0 ? void 0 : _a.name) || ''}:
|
9000
9133
|
${block((_b = failure.error) === null || _b === void 0 ? void 0 : _b.message.split('\n').map((line) => `> ${line}`).join('\n'))}
|
9001
9134
|
|
@@ -11802,6 +11935,7 @@ function cacheLlmTools(llmTools, options = {}) {
|
|
11802
11935
|
},
|
11803
11936
|
};
|
11804
11937
|
const callCommonModel = async (prompt) => {
|
11938
|
+
var _a;
|
11805
11939
|
const { parameters, content, modelRequirements } = prompt;
|
11806
11940
|
// <- Note: These are relevant things from the prompt that the cache key should depend on.
|
11807
11941
|
// TODO: Maybe some standalone function for normalization of content for cache
|
@@ -11857,11 +11991,42 @@ function cacheLlmTools(llmTools, options = {}) {
|
|
11857
11991
|
// 1. It has a content property that is null or undefined
|
11858
11992
|
// 2. It has an error property that is truthy
|
11859
11993
|
// 3. It has a success property that is explicitly false
|
11860
|
-
|
11994
|
+
// 4. It doesn't meet the prompt's expectations or format requirements
|
11995
|
+
const isBasicFailedResult = promptResult.content === null ||
|
11861
11996
|
promptResult.content === undefined ||
|
11862
11997
|
promptResult.error ||
|
11863
11998
|
promptResult.success === false;
|
11864
|
-
|
11999
|
+
let shouldCache = !isBasicFailedResult;
|
12000
|
+
// If the basic result is valid, check against expectations and format
|
12001
|
+
if (shouldCache && promptResult.content) {
|
12002
|
+
try {
|
12003
|
+
const validationResult = validatePromptResult({
|
12004
|
+
resultString: promptResult.content,
|
12005
|
+
expectations: prompt.expectations,
|
12006
|
+
format: prompt.format,
|
12007
|
+
});
|
12008
|
+
shouldCache = validationResult.isValid;
|
12009
|
+
if (!shouldCache && isVerbose) {
|
12010
|
+
console.info('Not caching result that fails expectations/format validation for key:', key, {
|
12011
|
+
content: promptResult.content,
|
12012
|
+
expectations: prompt.expectations,
|
12013
|
+
format: prompt.format,
|
12014
|
+
validationError: (_a = validationResult.error) === null || _a === void 0 ? void 0 : _a.message,
|
12015
|
+
});
|
12016
|
+
}
|
12017
|
+
}
|
12018
|
+
catch (error) {
|
12019
|
+
// If validation throws an unexpected error, don't cache
|
12020
|
+
shouldCache = false;
|
12021
|
+
if (isVerbose) {
|
12022
|
+
console.info('Not caching result due to validation error for key:', key, {
|
12023
|
+
content: promptResult.content,
|
12024
|
+
validationError: error instanceof Error ? error.message : String(error),
|
12025
|
+
});
|
12026
|
+
}
|
12027
|
+
}
|
12028
|
+
}
|
12029
|
+
if (shouldCache) {
|
11865
12030
|
await storage.setItem(key, {
|
11866
12031
|
date: $getCurrentDate(),
|
11867
12032
|
promptbookVersion: PROMPTBOOK_ENGINE_VERSION,
|
@@ -11878,7 +12043,7 @@ function cacheLlmTools(llmTools, options = {}) {
|
|
11878
12043
|
promptResult,
|
11879
12044
|
});
|
11880
12045
|
}
|
11881
|
-
else if (isVerbose) {
|
12046
|
+
else if (isVerbose && isBasicFailedResult) {
|
11882
12047
|
console.info('Not caching failed result for key:', key, {
|
11883
12048
|
content: promptResult.content,
|
11884
12049
|
error: promptResult.error,
|
@@ -11965,6 +12130,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
|
|
11965
12130
|
.list()
|
11966
12131
|
.find(({ packageName, className }) => llmConfiguration.packageName === packageName && llmConfiguration.className === className);
|
11967
12132
|
if (registeredItem === undefined) {
|
12133
|
+
console.log('!!! $llmToolsRegister.list()', $llmToolsRegister.list());
|
11968
12134
|
throw new Error(spaceTrim((block) => `
|
11969
12135
|
There is no constructor for LLM provider \`${llmConfiguration.className}\` from \`${llmConfiguration.packageName}\`
|
11970
12136
|
|