@promptbook/wizard 0.100.0-25 → 0.100.0-28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +9 -65
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +0 -5
- package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +9 -65
- package/umd/index.umd.js.map +1 -1
@@ -1,10 +1,11 @@
|
|
1
|
-
import type { ReadonlyDeep, WritableDeep } from 'type-fest';
|
1
|
+
import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
|
2
2
|
import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
|
3
3
|
import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
|
4
4
|
import type { Parameters } from '../../types/typeAliases';
|
5
5
|
import type { string_parameter_name } from '../../types/typeAliases';
|
6
6
|
import type { TODO_string } from '../../utils/organization/TODO_string';
|
7
7
|
import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
|
8
|
+
import type { PipelineExecutorResult } from '../PipelineExecutorResult';
|
8
9
|
import type { CreatePipelineExecutorOptions } from './00-CreatePipelineExecutorOptions';
|
9
10
|
/**
|
10
11
|
* Options for executing attempts of a pipeline task, including configuration for jokers, priority,
|
@@ -46,6 +47,10 @@ export type ExecuteAttemptsOptions = Required<Omit<CreatePipelineExecutorOptions
|
|
46
47
|
* The pipeline structure prepared for execution, as a deeply immutable PipelineJson object.
|
47
48
|
*/
|
48
49
|
readonly preparedPipeline: ReadonlyDeep<PipelineJson>;
|
50
|
+
/**
|
51
|
+
* Callback invoked with partial results as the execution progresses.
|
52
|
+
*/
|
53
|
+
onProgress(newOngoingResult: PartialDeep<PipelineExecutorResult>): Promisable<void>;
|
49
54
|
/**
|
50
55
|
* The execution report object, which is updated during execution.
|
51
56
|
*/
|
@@ -2,7 +2,6 @@ import Anthropic from '@anthropic-ai/sdk';
|
|
2
2
|
import type { AvailableModel } from '../../execution/AvailableModel';
|
3
3
|
import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
|
4
4
|
import type { ChatPromptResult } from '../../execution/PromptResult';
|
5
|
-
import type { CompletionPromptResult } from '../../execution/PromptResult';
|
6
5
|
import type { Prompt } from '../../types/Prompt';
|
7
6
|
import type { string_markdown } from '../../types/typeAliases';
|
8
7
|
import type { string_markdown_text } from '../../types/typeAliases';
|
@@ -42,10 +41,6 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
|
|
42
41
|
* Calls Anthropic Claude API to use a chat model.
|
43
42
|
*/
|
44
43
|
callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
|
45
|
-
/**
|
46
|
-
* Calls Anthropic Claude API to use a completion model.
|
47
|
-
*/
|
48
|
-
callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
|
49
44
|
/**
|
50
45
|
* Get the model that should be used as default
|
51
46
|
*/
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
16
16
|
/**
|
17
17
|
* Represents the version string of the Promptbook engine.
|
18
|
-
* It follows semantic versioning (e.g., `0.100.0-
|
18
|
+
* It follows semantic versioning (e.g., `0.100.0-27`).
|
19
19
|
*
|
20
20
|
* @generated
|
21
21
|
*/
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@promptbook/wizard",
|
3
|
-
"version": "0.100.0-
|
3
|
+
"version": "0.100.0-28",
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
5
5
|
"private": false,
|
6
6
|
"sideEffects": false,
|
@@ -95,7 +95,7 @@
|
|
95
95
|
"module": "./esm/index.es.js",
|
96
96
|
"typings": "./esm/typings/src/_packages/wizard.index.d.ts",
|
97
97
|
"peerDependencies": {
|
98
|
-
"@promptbook/core": "0.100.0-
|
98
|
+
"@promptbook/core": "0.100.0-28"
|
99
99
|
},
|
100
100
|
"dependencies": {
|
101
101
|
"@ai-sdk/deepseek": "0.1.6",
|
package/umd/index.umd.js
CHANGED
@@ -49,7 +49,7 @@
|
|
49
49
|
* @generated
|
50
50
|
* @see https://github.com/webgptorg/promptbook
|
51
51
|
*/
|
52
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.100.0-
|
52
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.100.0-28';
|
53
53
|
/**
|
54
54
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
55
55
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -2434,8 +2434,7 @@
|
|
2434
2434
|
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
2435
2435
|
const rawRequest = {
|
2436
2436
|
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
2437
|
-
max_tokens: modelRequirements.maxTokens ||
|
2438
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
2437
|
+
max_tokens: modelRequirements.maxTokens || 8192,
|
2439
2438
|
temperature: modelRequirements.temperature,
|
2440
2439
|
system: modelRequirements.systemMessage,
|
2441
2440
|
messages: [
|
@@ -2494,59 +2493,6 @@
|
|
2494
2493
|
},
|
2495
2494
|
});
|
2496
2495
|
}
|
2497
|
-
/**
|
2498
|
-
* Calls Anthropic Claude API to use a completion model.
|
2499
|
-
*/
|
2500
|
-
async callCompletionModel(prompt) {
|
2501
|
-
if (this.options.isVerbose) {
|
2502
|
-
console.info('🖋 Anthropic Claude callCompletionModel call');
|
2503
|
-
}
|
2504
|
-
const { content, parameters, modelRequirements } = prompt;
|
2505
|
-
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
2506
|
-
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
2507
|
-
}
|
2508
|
-
const client = await this.getClient();
|
2509
|
-
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
2510
|
-
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
2511
|
-
const rawRequest = {
|
2512
|
-
model: modelName,
|
2513
|
-
max_tokens_to_sample: modelRequirements.maxTokens || 2000,
|
2514
|
-
temperature: modelRequirements.temperature,
|
2515
|
-
prompt: rawPromptContent,
|
2516
|
-
};
|
2517
|
-
const start = $getCurrentDate();
|
2518
|
-
const rawResponse = await this.limiter
|
2519
|
-
.schedule(() => client.completions.create(rawRequest))
|
2520
|
-
.catch((error) => {
|
2521
|
-
if (this.options.isVerbose) {
|
2522
|
-
console.info(colors__default["default"].bgRed('error'), error);
|
2523
|
-
}
|
2524
|
-
throw error;
|
2525
|
-
});
|
2526
|
-
if (this.options.isVerbose) {
|
2527
|
-
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
2528
|
-
}
|
2529
|
-
if (!rawResponse.completion) {
|
2530
|
-
throw new PipelineExecutionError('No completion from Anthropic Claude');
|
2531
|
-
}
|
2532
|
-
const resultContent = rawResponse.completion;
|
2533
|
-
const complete = $getCurrentDate();
|
2534
|
-
const usage = computeAnthropicClaudeUsage(rawPromptContent, resultContent, rawResponse);
|
2535
|
-
return exportJson({
|
2536
|
-
name: 'promptResult',
|
2537
|
-
message: `Result of \`AnthropicClaudeExecutionTools.callCompletionModel\``,
|
2538
|
-
order: [],
|
2539
|
-
value: {
|
2540
|
-
content: resultContent,
|
2541
|
-
modelName: rawResponse.model || modelName,
|
2542
|
-
timing: { start, complete },
|
2543
|
-
usage,
|
2544
|
-
rawPromptContent,
|
2545
|
-
rawRequest,
|
2546
|
-
rawResponse,
|
2547
|
-
},
|
2548
|
-
});
|
2549
|
-
}
|
2550
2496
|
// <- Note: [🤖] callXxxModel
|
2551
2497
|
/**
|
2552
2498
|
* Get the model that should be used as default
|
@@ -3252,7 +3198,6 @@
|
|
3252
3198
|
const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
3253
3199
|
const modelSettings = {
|
3254
3200
|
maxTokens: modelRequirements.maxTokens,
|
3255
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
3256
3201
|
temperature: modelRequirements.temperature,
|
3257
3202
|
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
3258
3203
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
@@ -3358,8 +3303,7 @@
|
|
3358
3303
|
try {
|
3359
3304
|
const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
3360
3305
|
const modelSettings = {
|
3361
|
-
maxTokens: modelRequirements.maxTokens
|
3362
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
3306
|
+
maxTokens: modelRequirements.maxTokens,
|
3363
3307
|
temperature: modelRequirements.temperature,
|
3364
3308
|
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
3365
3309
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
@@ -4448,7 +4392,6 @@
|
|
4448
4392
|
const modelSettings = {
|
4449
4393
|
model: modelName,
|
4450
4394
|
max_tokens: modelRequirements.maxTokens,
|
4451
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
4452
4395
|
temperature: modelRequirements.temperature,
|
4453
4396
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4454
4397
|
// <- Note: [🧆]
|
@@ -4544,8 +4487,7 @@
|
|
4544
4487
|
const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
4545
4488
|
const modelSettings = {
|
4546
4489
|
model: modelName,
|
4547
|
-
max_tokens: modelRequirements.maxTokens
|
4548
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
4490
|
+
max_tokens: modelRequirements.maxTokens,
|
4549
4491
|
temperature: modelRequirements.temperature,
|
4550
4492
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4551
4493
|
// <- Note: [🧆]
|
@@ -5264,8 +5206,6 @@
|
|
5264
5206
|
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
5265
5207
|
const modelSettings = {
|
5266
5208
|
model: modelName,
|
5267
|
-
max_tokens: modelRequirements.maxTokens,
|
5268
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
5269
5209
|
|
5270
5210
|
temperature: modelRequirements.temperature,
|
5271
5211
|
|
@@ -8943,7 +8883,7 @@
|
|
8943
8883
|
*/
|
8944
8884
|
async function executeAttempts(options) {
|
8945
8885
|
const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
|
8946
|
-
preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, } = options;
|
8886
|
+
preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
|
8947
8887
|
const $ongoingTaskResult = {
|
8948
8888
|
$result: null,
|
8949
8889
|
$resultString: null,
|
@@ -9187,6 +9127,10 @@
|
|
9187
9127
|
result: $ongoingTaskResult.$resultString,
|
9188
9128
|
error: error,
|
9189
9129
|
});
|
9130
|
+
// Report failed attempt
|
9131
|
+
onProgress({
|
9132
|
+
errors: [error],
|
9133
|
+
});
|
9190
9134
|
}
|
9191
9135
|
finally {
|
9192
9136
|
if (!isJokerAttempt &&
|