@promptbook/wizard 0.100.0-26 → 0.100.0-28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +10 -68
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/utils.index.d.ts +0 -2
- package/esm/typings/src/config.d.ts +0 -6
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +0 -5
- package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +10 -68
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
@@ -38,7 +38,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
38
38
|
* @generated
|
39
39
|
* @see https://github.com/webgptorg/promptbook
|
40
40
|
*/
|
41
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.100.0-
|
41
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.100.0-28';
|
42
42
|
/**
|
43
43
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
44
44
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -183,12 +183,6 @@ const VALUE_STRINGS = {
|
|
183
183
|
unserializable: '(unserializable value)',
|
184
184
|
circular: '(circular JSON)',
|
185
185
|
};
|
186
|
-
/**
|
187
|
-
* Default cap for the number of tokens in a single request to the LLM
|
188
|
-
*
|
189
|
-
* @public exported from `@promptbook/utils`
|
190
|
-
*/
|
191
|
-
const MAX_TOKENS = 1048576;
|
192
186
|
/**
|
193
187
|
* Small number limit
|
194
188
|
*
|
@@ -2429,7 +2423,7 @@ class AnthropicClaudeExecutionTools {
|
|
2429
2423
|
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
2430
2424
|
const rawRequest = {
|
2431
2425
|
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
2432
|
-
max_tokens: modelRequirements.maxTokens ||
|
2426
|
+
max_tokens: modelRequirements.maxTokens || 8192,
|
2433
2427
|
temperature: modelRequirements.temperature,
|
2434
2428
|
system: modelRequirements.systemMessage,
|
2435
2429
|
messages: [
|
@@ -2488,59 +2482,6 @@ class AnthropicClaudeExecutionTools {
|
|
2488
2482
|
},
|
2489
2483
|
});
|
2490
2484
|
}
|
2491
|
-
/**
|
2492
|
-
* Calls Anthropic Claude API to use a completion model.
|
2493
|
-
*/
|
2494
|
-
async callCompletionModel(prompt) {
|
2495
|
-
if (this.options.isVerbose) {
|
2496
|
-
console.info('🖋 Anthropic Claude callCompletionModel call');
|
2497
|
-
}
|
2498
|
-
const { content, parameters, modelRequirements } = prompt;
|
2499
|
-
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
2500
|
-
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
2501
|
-
}
|
2502
|
-
const client = await this.getClient();
|
2503
|
-
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
2504
|
-
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
2505
|
-
const rawRequest = {
|
2506
|
-
model: modelName,
|
2507
|
-
max_tokens_to_sample: modelRequirements.maxTokens || MAX_TOKENS,
|
2508
|
-
temperature: modelRequirements.temperature,
|
2509
|
-
prompt: rawPromptContent,
|
2510
|
-
};
|
2511
|
-
const start = $getCurrentDate();
|
2512
|
-
const rawResponse = await this.limiter
|
2513
|
-
.schedule(() => client.completions.create(rawRequest))
|
2514
|
-
.catch((error) => {
|
2515
|
-
if (this.options.isVerbose) {
|
2516
|
-
console.info(colors.bgRed('error'), error);
|
2517
|
-
}
|
2518
|
-
throw error;
|
2519
|
-
});
|
2520
|
-
if (this.options.isVerbose) {
|
2521
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
2522
|
-
}
|
2523
|
-
if (!rawResponse.completion) {
|
2524
|
-
throw new PipelineExecutionError('No completion from Anthropic Claude');
|
2525
|
-
}
|
2526
|
-
const resultContent = rawResponse.completion;
|
2527
|
-
const complete = $getCurrentDate();
|
2528
|
-
const usage = computeAnthropicClaudeUsage(rawPromptContent, resultContent, rawResponse);
|
2529
|
-
return exportJson({
|
2530
|
-
name: 'promptResult',
|
2531
|
-
message: `Result of \`AnthropicClaudeExecutionTools.callCompletionModel\``,
|
2532
|
-
order: [],
|
2533
|
-
value: {
|
2534
|
-
content: resultContent,
|
2535
|
-
modelName: rawResponse.model || modelName,
|
2536
|
-
timing: { start, complete },
|
2537
|
-
usage,
|
2538
|
-
rawPromptContent,
|
2539
|
-
rawRequest,
|
2540
|
-
rawResponse,
|
2541
|
-
},
|
2542
|
-
});
|
2543
|
-
}
|
2544
2485
|
// <- Note: [🤖] callXxxModel
|
2545
2486
|
/**
|
2546
2487
|
* Get the model that should be used as default
|
@@ -3245,7 +3186,7 @@ class AzureOpenAiExecutionTools {
|
|
3245
3186
|
try {
|
3246
3187
|
const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
3247
3188
|
const modelSettings = {
|
3248
|
-
maxTokens: modelRequirements.maxTokens
|
3189
|
+
maxTokens: modelRequirements.maxTokens,
|
3249
3190
|
temperature: modelRequirements.temperature,
|
3250
3191
|
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
3251
3192
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
@@ -3351,7 +3292,7 @@ class AzureOpenAiExecutionTools {
|
|
3351
3292
|
try {
|
3352
3293
|
const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
3353
3294
|
const modelSettings = {
|
3354
|
-
maxTokens: modelRequirements.maxTokens
|
3295
|
+
maxTokens: modelRequirements.maxTokens,
|
3355
3296
|
temperature: modelRequirements.temperature,
|
3356
3297
|
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
3357
3298
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
@@ -4439,7 +4380,7 @@ class OpenAiCompatibleExecutionTools {
|
|
4439
4380
|
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
4440
4381
|
const modelSettings = {
|
4441
4382
|
model: modelName,
|
4442
|
-
max_tokens: modelRequirements.maxTokens
|
4383
|
+
max_tokens: modelRequirements.maxTokens,
|
4443
4384
|
temperature: modelRequirements.temperature,
|
4444
4385
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4445
4386
|
// <- Note: [🧆]
|
@@ -4535,7 +4476,7 @@ class OpenAiCompatibleExecutionTools {
|
|
4535
4476
|
const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
4536
4477
|
const modelSettings = {
|
4537
4478
|
model: modelName,
|
4538
|
-
max_tokens: modelRequirements.maxTokens
|
4479
|
+
max_tokens: modelRequirements.maxTokens,
|
4539
4480
|
temperature: modelRequirements.temperature,
|
4540
4481
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4541
4482
|
// <- Note: [🧆]
|
@@ -5254,7 +5195,6 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
5254
5195
|
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
5255
5196
|
const modelSettings = {
|
5256
5197
|
model: modelName,
|
5257
|
-
max_tokens: MAX_TOKENS
|
5258
5198
|
|
5259
5199
|
temperature: modelRequirements.temperature,
|
5260
5200
|
|
@@ -9176,8 +9116,10 @@ async function executeAttempts(options) {
|
|
9176
9116
|
result: $ongoingTaskResult.$resultString,
|
9177
9117
|
error: error,
|
9178
9118
|
});
|
9179
|
-
//
|
9180
|
-
onProgress({
|
9119
|
+
// Report failed attempt
|
9120
|
+
onProgress({
|
9121
|
+
errors: [error],
|
9122
|
+
});
|
9181
9123
|
}
|
9182
9124
|
finally {
|
9183
9125
|
if (!isJokerAttempt &&
|