@promptbook/wizard 0.100.0-26 → 0.100.0-28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,5 @@
1
1
  import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
2
2
  import { VALUE_STRINGS } from '../config';
3
- import { MAX_TOKENS } from '../config';
4
3
  import { SMALL_NUMBER } from '../config';
5
4
  import { renderPromptbookMermaid } from '../conversion/prettify/renderPipelineMermaidOptions';
6
5
  import { deserializeError } from '../errors/utils/deserializeError';
@@ -86,7 +85,6 @@ import { isValidUrl } from '../utils/validators/url/isValidUrl';
86
85
  import { isValidUuid } from '../utils/validators/uuid/isValidUuid';
87
86
  export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
88
87
  export { VALUE_STRINGS };
89
- export { MAX_TOKENS };
90
88
  export { SMALL_NUMBER };
91
89
  export { renderPromptbookMermaid };
92
90
  export { deserializeError };
@@ -129,12 +129,6 @@ export declare const VALUE_STRINGS: {
129
129
  readonly unserializable: "(unserializable value)";
130
130
  readonly circular: "(circular JSON)";
131
131
  };
132
- /**
133
- * Default cap for the number of tokens in a single request to the LLM
134
- *
135
- * @public exported from `@promptbook/utils`
136
- */
137
- export declare const MAX_TOKENS = 1048576;
138
132
  /**
139
133
  * Small number limit
140
134
  *
@@ -2,7 +2,6 @@ import Anthropic from '@anthropic-ai/sdk';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
4
  import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
6
5
  import type { Prompt } from '../../types/Prompt';
7
6
  import type { string_markdown } from '../../types/typeAliases';
8
7
  import type { string_markdown_text } from '../../types/typeAliases';
@@ -42,10 +41,6 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
42
41
  * Calls Anthropic Claude API to use a chat model.
43
42
  */
44
43
  callChatModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ChatPromptResult>;
45
- /**
46
- * Calls Anthropic Claude API to use a completion model.
47
- */
48
- callCompletionModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<CompletionPromptResult>;
49
44
  /**
50
45
  * Get the model that should be used as default
51
46
  */
@@ -87,8 +87,6 @@ export type CommonModelRequirements = {
87
87
  readonly seed?: number_seed;
88
88
  /**
89
89
  * Maximum number of tokens that can be generated by the model
90
- *
91
- * Note: [🌾]
92
90
  */
93
91
  readonly maxTokens?: number;
94
92
  };
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.100.0-25`).
18
+ * It follows semantic versioning (e.g., `0.100.0-27`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/wizard",
3
- "version": "0.100.0-26",
3
+ "version": "0.100.0-28",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/wizard.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.100.0-26"
98
+ "@promptbook/core": "0.100.0-28"
99
99
  },
100
100
  "dependencies": {
101
101
  "@ai-sdk/deepseek": "0.1.6",
package/umd/index.umd.js CHANGED
@@ -49,7 +49,7 @@
49
49
  * @generated
50
50
  * @see https://github.com/webgptorg/promptbook
51
51
  */
52
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-26';
52
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-28';
53
53
  /**
54
54
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
55
55
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -194,12 +194,6 @@
194
194
  unserializable: '(unserializable value)',
195
195
  circular: '(circular JSON)',
196
196
  };
197
- /**
198
- * Default cap for the number of tokens in a single request to the LLM
199
- *
200
- * @public exported from `@promptbook/utils`
201
- */
202
- const MAX_TOKENS = 1048576;
203
197
  /**
204
198
  * Small number limit
205
199
  *
@@ -2440,7 +2434,7 @@
2440
2434
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2441
2435
  const rawRequest = {
2442
2436
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
2443
- max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
2437
+ max_tokens: modelRequirements.maxTokens || 8192,
2444
2438
  temperature: modelRequirements.temperature,
2445
2439
  system: modelRequirements.systemMessage,
2446
2440
  messages: [
@@ -2499,59 +2493,6 @@
2499
2493
  },
2500
2494
  });
2501
2495
  }
2502
- /**
2503
- * Calls Anthropic Claude API to use a completion model.
2504
- */
2505
- async callCompletionModel(prompt) {
2506
- if (this.options.isVerbose) {
2507
- console.info('🖋 Anthropic Claude callCompletionModel call');
2508
- }
2509
- const { content, parameters, modelRequirements } = prompt;
2510
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2511
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2512
- }
2513
- const client = await this.getClient();
2514
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2515
- const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2516
- const rawRequest = {
2517
- model: modelName,
2518
- max_tokens_to_sample: modelRequirements.maxTokens || MAX_TOKENS,
2519
- temperature: modelRequirements.temperature,
2520
- prompt: rawPromptContent,
2521
- };
2522
- const start = $getCurrentDate();
2523
- const rawResponse = await this.limiter
2524
- .schedule(() => client.completions.create(rawRequest))
2525
- .catch((error) => {
2526
- if (this.options.isVerbose) {
2527
- console.info(colors__default["default"].bgRed('error'), error);
2528
- }
2529
- throw error;
2530
- });
2531
- if (this.options.isVerbose) {
2532
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2533
- }
2534
- if (!rawResponse.completion) {
2535
- throw new PipelineExecutionError('No completion from Anthropic Claude');
2536
- }
2537
- const resultContent = rawResponse.completion;
2538
- const complete = $getCurrentDate();
2539
- const usage = computeAnthropicClaudeUsage(rawPromptContent, resultContent, rawResponse);
2540
- return exportJson({
2541
- name: 'promptResult',
2542
- message: `Result of \`AnthropicClaudeExecutionTools.callCompletionModel\``,
2543
- order: [],
2544
- value: {
2545
- content: resultContent,
2546
- modelName: rawResponse.model || modelName,
2547
- timing: { start, complete },
2548
- usage,
2549
- rawPromptContent,
2550
- rawRequest,
2551
- rawResponse,
2552
- },
2553
- });
2554
- }
2555
2496
  // <- Note: [🤖] callXxxModel
2556
2497
  /**
2557
2498
  * Get the model that should be used as default
@@ -3256,7 +3197,7 @@
3256
3197
  try {
3257
3198
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3258
3199
  const modelSettings = {
3259
- maxTokens: modelRequirements.maxTokens || MAX_TOKENS,
3200
+ maxTokens: modelRequirements.maxTokens,
3260
3201
  temperature: modelRequirements.temperature,
3261
3202
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3262
3203
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3362,7 +3303,7 @@
3362
3303
  try {
3363
3304
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3364
3305
  const modelSettings = {
3365
- maxTokens: modelRequirements.maxTokens || MAX_TOKENS,
3306
+ maxTokens: modelRequirements.maxTokens,
3366
3307
  temperature: modelRequirements.temperature,
3367
3308
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3368
3309
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -4450,7 +4391,7 @@
4450
4391
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
4451
4392
  const modelSettings = {
4452
4393
  model: modelName,
4453
- max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
4394
+ max_tokens: modelRequirements.maxTokens,
4454
4395
  temperature: modelRequirements.temperature,
4455
4396
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4456
4397
  // <- Note: [🧆]
@@ -4546,7 +4487,7 @@
4546
4487
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4547
4488
  const modelSettings = {
4548
4489
  model: modelName,
4549
- max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
4490
+ max_tokens: modelRequirements.maxTokens,
4550
4491
  temperature: modelRequirements.temperature,
4551
4492
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4552
4493
  // <- Note: [🧆]
@@ -5265,7 +5206,6 @@
5265
5206
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5266
5207
  const modelSettings = {
5267
5208
  model: modelName,
5268
- max_tokens: MAX_TOKENS
5269
5209
 
5270
5210
  temperature: modelRequirements.temperature,
5271
5211
 
@@ -9187,8 +9127,10 @@
9187
9127
  result: $ongoingTaskResult.$resultString,
9188
9128
  error: error,
9189
9129
  });
9190
- // Note: Calling void function to signal progress (mutation of `$ongoingTaskResult`) - TODO: !!!! Is this working
9191
- onProgress({});
9130
+ // Report failed attempt
9131
+ onProgress({
9132
+ errors: [error],
9133
+ });
9192
9134
  }
9193
9135
  finally {
9194
9136
  if (!isJokerAttempt &&