@promptbook/wizard 0.100.0-24 → 0.100.0-26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,6 @@
1
1
  import { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION } from '../version';
2
2
  import { VALUE_STRINGS } from '../config';
3
+ import { MAX_TOKENS } from '../config';
3
4
  import { SMALL_NUMBER } from '../config';
4
5
  import { renderPromptbookMermaid } from '../conversion/prettify/renderPipelineMermaidOptions';
5
6
  import { deserializeError } from '../errors/utils/deserializeError';
@@ -85,6 +86,7 @@ import { isValidUrl } from '../utils/validators/url/isValidUrl';
85
86
  import { isValidUuid } from '../utils/validators/uuid/isValidUuid';
86
87
  export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
87
88
  export { VALUE_STRINGS };
89
+ export { MAX_TOKENS };
88
90
  export { SMALL_NUMBER };
89
91
  export { renderPromptbookMermaid };
90
92
  export { deserializeError };
@@ -6,7 +6,7 @@ export interface BookEditorProps {
6
6
  className?: string;
7
7
  /**
8
8
  * CSS className for a font (e.g. from next/font) to style the editor text.
9
- * If omitted, defaults to system fonts.
9
+ * If omitted, defaults to system serif fonts.
10
10
  */
11
11
  fontClassName?: string;
12
12
  /**
@@ -21,6 +21,8 @@ export interface BookEditorProps {
21
21
  /**
22
22
  * Renders a book editor
23
23
  *
24
+ * 🔥 LIVE COMPONENT TEST: This component is being served live from the playground server!
25
+ *
24
26
  * @public exported from `@promptbook/components`
25
27
  */
26
28
  export declare function BookEditor(props: BookEditorProps): import("react/jsx-runtime").JSX.Element;
@@ -129,6 +129,12 @@ export declare const VALUE_STRINGS: {
129
129
  readonly unserializable: "(unserializable value)";
130
130
  readonly circular: "(circular JSON)";
131
131
  };
132
+ /**
133
+ * Default cap for the number of tokens in a single request to the LLM
134
+ *
135
+ * @public exported from `@promptbook/utils`
136
+ */
137
+ export declare const MAX_TOKENS = 1048576;
132
138
  /**
133
139
  * Small number limit
134
140
  *
@@ -1,10 +1,11 @@
1
- import type { ReadonlyDeep, WritableDeep } from 'type-fest';
1
+ import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
4
  import type { Parameters } from '../../types/typeAliases';
5
5
  import type { string_parameter_name } from '../../types/typeAliases';
6
6
  import type { TODO_string } from '../../utils/organization/TODO_string';
7
7
  import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
8
+ import type { PipelineExecutorResult } from '../PipelineExecutorResult';
8
9
  import type { CreatePipelineExecutorOptions } from './00-CreatePipelineExecutorOptions';
9
10
  /**
10
11
  * Options for executing attempts of a pipeline task, including configuration for jokers, priority,
@@ -46,6 +47,10 @@ export type ExecuteAttemptsOptions = Required<Omit<CreatePipelineExecutorOptions
46
47
  * The pipeline structure prepared for execution, as a deeply immutable PipelineJson object.
47
48
  */
48
49
  readonly preparedPipeline: ReadonlyDeep<PipelineJson>;
50
+ /**
51
+ * Callback invoked with partial results as the execution progresses.
52
+ */
53
+ onProgress(newOngoingResult: PartialDeep<PipelineExecutorResult>): Promisable<void>;
49
54
  /**
50
55
  * The execution report object, which is updated during execution.
51
56
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.100.0-23`).
18
+ * It follows semantic versioning (e.g., `0.100.0-25`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/wizard",
3
- "version": "0.100.0-24",
3
+ "version": "0.100.0-26",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/wizard.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.100.0-24"
98
+ "@promptbook/core": "0.100.0-26"
99
99
  },
100
100
  "dependencies": {
101
101
  "@ai-sdk/deepseek": "0.1.6",
package/umd/index.umd.js CHANGED
@@ -49,7 +49,7 @@
49
49
  * @generated
50
50
  * @see https://github.com/webgptorg/promptbook
51
51
  */
52
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-24';
52
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-26';
53
53
  /**
54
54
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
55
55
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -194,6 +194,12 @@
194
194
  unserializable: '(unserializable value)',
195
195
  circular: '(circular JSON)',
196
196
  };
197
+ /**
198
+ * Default cap for the number of tokens in a single request to the LLM
199
+ *
200
+ * @public exported from `@promptbook/utils`
201
+ */
202
+ const MAX_TOKENS = 1048576;
197
203
  /**
198
204
  * Small number limit
199
205
  *
@@ -2434,8 +2440,7 @@
2434
2440
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2435
2441
  const rawRequest = {
2436
2442
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
2437
- max_tokens: modelRequirements.maxTokens || 4096,
2438
- // <- TODO: [🌾] Make some global max cap for maxTokens
2443
+ max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
2439
2444
  temperature: modelRequirements.temperature,
2440
2445
  system: modelRequirements.systemMessage,
2441
2446
  messages: [
@@ -2510,7 +2515,7 @@
2510
2515
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2511
2516
  const rawRequest = {
2512
2517
  model: modelName,
2513
- max_tokens_to_sample: modelRequirements.maxTokens || 2000,
2518
+ max_tokens_to_sample: modelRequirements.maxTokens || MAX_TOKENS,
2514
2519
  temperature: modelRequirements.temperature,
2515
2520
  prompt: rawPromptContent,
2516
2521
  };
@@ -3251,8 +3256,7 @@
3251
3256
  try {
3252
3257
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3253
3258
  const modelSettings = {
3254
- maxTokens: modelRequirements.maxTokens,
3255
- // <- TODO: [🌾] Make some global max cap for maxTokens
3259
+ maxTokens: modelRequirements.maxTokens || MAX_TOKENS,
3256
3260
  temperature: modelRequirements.temperature,
3257
3261
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3258
3262
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3358,8 +3362,7 @@
3358
3362
  try {
3359
3363
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3360
3364
  const modelSettings = {
3361
- maxTokens: modelRequirements.maxTokens || 2000,
3362
- // <- TODO: [🌾] Make some global max cap for maxTokens
3365
+ maxTokens: modelRequirements.maxTokens || MAX_TOKENS,
3363
3366
  temperature: modelRequirements.temperature,
3364
3367
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3365
3368
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -4447,8 +4450,7 @@
4447
4450
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
4448
4451
  const modelSettings = {
4449
4452
  model: modelName,
4450
- max_tokens: modelRequirements.maxTokens,
4451
- // <- TODO: [🌾] Make some global max cap for maxTokens
4453
+ max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
4452
4454
  temperature: modelRequirements.temperature,
4453
4455
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4454
4456
  // <- Note: [🧆]
@@ -4544,8 +4546,7 @@
4544
4546
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4545
4547
  const modelSettings = {
4546
4548
  model: modelName,
4547
- max_tokens: modelRequirements.maxTokens || 2000,
4548
- // <- TODO: [🌾] Make some global max cap for maxTokens
4549
+ max_tokens: modelRequirements.maxTokens || MAX_TOKENS,
4549
4550
  temperature: modelRequirements.temperature,
4550
4551
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4551
4552
  // <- Note: [🧆]
@@ -5264,8 +5265,7 @@
5264
5265
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5265
5266
  const modelSettings = {
5266
5267
  model: modelName,
5267
- max_tokens: modelRequirements.maxTokens,
5268
- // <- TODO: [🌾] Make some global max cap for maxTokens
5268
+ max_tokens: MAX_TOKENS
5269
5269
 
5270
5270
  temperature: modelRequirements.temperature,
5271
5271
 
@@ -8943,7 +8943,7 @@
8943
8943
  */
8944
8944
  async function executeAttempts(options) {
8945
8945
  const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
8946
- preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, } = options;
8946
+ preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
8947
8947
  const $ongoingTaskResult = {
8948
8948
  $result: null,
8949
8949
  $resultString: null,
@@ -9187,6 +9187,8 @@
9187
9187
  result: $ongoingTaskResult.$resultString,
9188
9188
  error: error,
9189
9189
  });
9190
+ // Note: Calling void function to signal progress (mutation of `$ongoingTaskResult`) - TODO: !!!! Is this working
9191
+ onProgress({});
9190
9192
  }
9191
9193
  finally {
9192
9194
  if (!isJokerAttempt &&