@promptbook/wizard 0.104.0-2 → 0.104.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -36,7 +36,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
36
36
  * @generated
37
37
  * @see https://github.com/webgptorg/promptbook
38
38
  */
39
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-2';
39
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
40
40
  /**
41
41
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
42
42
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2526,6 +2526,7 @@ class RemoteLlmExecutionTools {
2526
2526
  }
2527
2527
  }
2528
2528
  /**
2529
+ * TODO: !!!! Deprecate pipeline server and all of its components
2529
2530
  * TODO: Maybe use `$exportJson`
2530
2531
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
2531
2532
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -5828,7 +5829,7 @@ class OpenAiCompatibleExecutionTools {
5828
5829
  let threadMessages = [];
5829
5830
  if ('thread' in prompt && Array.isArray(prompt.thread)) {
5830
5831
  threadMessages = prompt.thread.map((msg) => ({
5831
- role: msg.role === 'assistant' ? 'assistant' : 'user',
5832
+ role: msg.sender === 'assistant' ? 'assistant' : 'user',
5832
5833
  content: msg.content,
5833
5834
  }));
5834
5835
  }
@@ -9417,6 +9418,15 @@ function countUsage(llmTools) {
9417
9418
  return promptResult;
9418
9419
  };
9419
9420
  }
9421
+ if (llmTools.callImageGenerationModel !== undefined) {
9422
+ proxyTools.callImageGenerationModel = async (prompt) => {
9423
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
9424
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
9425
+ totalUsage = addUsage(totalUsage, promptResult.usage);
9426
+ spending.next(promptResult.usage);
9427
+ return promptResult;
9428
+ };
9429
+ }
9420
9430
  // <- Note: [🤖]
9421
9431
  return proxyTools;
9422
9432
  }
@@ -9526,6 +9536,12 @@ class MultipleLlmExecutionTools {
9526
9536
  callEmbeddingModel(prompt) {
9527
9537
  return this.callCommonModel(prompt);
9528
9538
  }
9539
+ /**
9540
+ * Calls the best available embedding model
9541
+ */
9542
+ callImageGenerationModel(prompt) {
9543
+ return this.callCommonModel(prompt);
9544
+ }
9529
9545
  // <- Note: [🤖]
9530
9546
  /**
9531
9547
  * Calls the best available model
@@ -9552,6 +9568,11 @@ class MultipleLlmExecutionTools {
9552
9568
  continue llm;
9553
9569
  }
9554
9570
  return await llmExecutionTools.callEmbeddingModel(prompt);
9571
+ case 'IMAGE_GENERATION':
9572
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
9573
+ continue llm;
9574
+ }
9575
+ return await llmExecutionTools.callImageGenerationModel(prompt);
9555
9576
  // <- case [🤖]:
9556
9577
  default:
9557
9578
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -11255,8 +11276,9 @@ async function executeAttempts(options) {
11255
11276
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
11256
11277
  break variant;
11257
11278
  case 'EMBEDDING':
11279
+ case 'IMAGE_GENERATION':
11258
11280
  throw new PipelineExecutionError(spaceTrim$1((block) => `
11259
- Embedding model can not be used in pipeline
11281
+ ${modelRequirements.modelVariant} model can not be used in pipeline
11260
11282
 
11261
11283
  This should be catched during parsing
11262
11284
 
@@ -17137,17 +17159,64 @@ function parseAgentSourceWithCommitments(agentSource) {
17137
17159
  };
17138
17160
  }
17139
17161
  const lines = agentSource.split('\n');
17140
- const agentName = (((_a = lines[0]) === null || _a === void 0 ? void 0 : _a.trim()) || null);
17162
+ let agentName = null;
17163
+ let agentNameLineIndex = -1;
17164
+ // Find the agent name: first non-empty line that is not a commitment and not a horizontal line
17165
+ for (let i = 0; i < lines.length; i++) {
17166
+ const line = lines[i];
17167
+ if (line === undefined) {
17168
+ continue;
17169
+ }
17170
+ const trimmed = line.trim();
17171
+ if (!trimmed) {
17172
+ continue;
17173
+ }
17174
+ const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
17175
+ if (isHorizontal) {
17176
+ continue;
17177
+ }
17178
+ let isCommitment = false;
17179
+ for (const definition of COMMITMENT_REGISTRY) {
17180
+ const typeRegex = definition.createTypeRegex();
17181
+ const match = typeRegex.exec(trimmed);
17182
+ if (match && ((_a = match.groups) === null || _a === void 0 ? void 0 : _a.type)) {
17183
+ isCommitment = true;
17184
+ break;
17185
+ }
17186
+ }
17187
+ if (!isCommitment) {
17188
+ agentName = trimmed;
17189
+ agentNameLineIndex = i;
17190
+ break;
17191
+ }
17192
+ }
17141
17193
  const commitments = [];
17142
17194
  const nonCommitmentLines = [];
17143
- // Always add the first line (agent name) to non-commitment lines
17144
- if (lines[0] !== undefined) {
17145
- nonCommitmentLines.push(lines[0]);
17195
+ // Add lines before agentName that are horizontal lines (they are non-commitment)
17196
+ for (let i = 0; i < agentNameLineIndex; i++) {
17197
+ const line = lines[i];
17198
+ if (line === undefined) {
17199
+ continue;
17200
+ }
17201
+ const trimmed = line.trim();
17202
+ if (!trimmed) {
17203
+ continue;
17204
+ }
17205
+ const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
17206
+ if (isHorizontal) {
17207
+ nonCommitmentLines.push(line);
17208
+ }
17209
+ // Note: Commitments before agentName are not added to nonCommitmentLines
17210
+ }
17211
+ // Add the agent name line to non-commitment lines
17212
+ if (agentNameLineIndex >= 0) {
17213
+ nonCommitmentLines.push(lines[agentNameLineIndex]);
17146
17214
  }
17147
17215
  // Parse commitments with multiline support
17148
17216
  let currentCommitment = null;
17149
- // Process lines starting from the second line (skip agent name)
17150
- for (let i = 1; i < lines.length; i++) {
17217
+ // Process lines starting from after the agent name line
17218
+ const startIndex = agentNameLineIndex >= 0 ? agentNameLineIndex + 1 : 0;
17219
+ for (let i = startIndex; i < lines.length; i++) {
17151
17220
  const line = lines[i];
17152
17221
  if (line === undefined) {
17153
17222
  continue;
@@ -19062,6 +19131,9 @@ function cacheLlmTools(llmTools, options = {}) {
19062
19131
  case 'EMBEDDING':
19063
19132
  promptResult = await llmTools.callEmbeddingModel(prompt);
19064
19133
  break variant;
19134
+ case 'IMAGE_GENERATION':
19135
+ promptResult = await llmTools.callImageGenerationModel(prompt);
19136
+ break variant;
19065
19137
  // <- case [🤖]:
19066
19138
  default:
19067
19139
  throw new PipelineExecutionError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
@@ -19149,6 +19221,11 @@ function cacheLlmTools(llmTools, options = {}) {
19149
19221
  return /* not await */ callCommonModel(prompt);
19150
19222
  };
19151
19223
  }
19224
+ if (llmTools.callImageGenerationModel !== undefined) {
19225
+ proxyTools.callImageGenerationModel = async (prompt) => {
19226
+ return /* not await */ callCommonModel(prompt);
19227
+ };
19228
+ }
19152
19229
  // <- Note: [🤖]
19153
19230
  return proxyTools;
19154
19231
  }