@promptbook/core 0.104.0-2 → 0.104.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-2';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3744,6 +3744,15 @@ function countUsage(llmTools) {
3744
3744
  return promptResult;
3745
3745
  };
3746
3746
  }
3747
+ if (llmTools.callImageGenerationModel !== undefined) {
3748
+ proxyTools.callImageGenerationModel = async (prompt) => {
3749
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
3750
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
3751
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3752
+ spending.next(promptResult.usage);
3753
+ return promptResult;
3754
+ };
3755
+ }
3747
3756
  // <- Note: [🤖]
3748
3757
  return proxyTools;
3749
3758
  }
@@ -3853,6 +3862,12 @@ class MultipleLlmExecutionTools {
3853
3862
  callEmbeddingModel(prompt) {
3854
3863
  return this.callCommonModel(prompt);
3855
3864
  }
3865
+ /**
3866
+ * Calls the best available embedding model
3867
+ */
3868
+ callImageGenerationModel(prompt) {
3869
+ return this.callCommonModel(prompt);
3870
+ }
3856
3871
  // <- Note: [🤖]
3857
3872
  /**
3858
3873
  * Calls the best available model
@@ -3879,6 +3894,11 @@ class MultipleLlmExecutionTools {
3879
3894
  continue llm;
3880
3895
  }
3881
3896
  return await llmExecutionTools.callEmbeddingModel(prompt);
3897
+ case 'IMAGE_GENERATION':
3898
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
3899
+ continue llm;
3900
+ }
3901
+ return await llmExecutionTools.callImageGenerationModel(prompt);
3882
3902
  // <- case [🤖]:
3883
3903
  default:
3884
3904
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -6304,8 +6324,9 @@ async function executeAttempts(options) {
6304
6324
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
6305
6325
  break variant;
6306
6326
  case 'EMBEDDING':
6327
+ case 'IMAGE_GENERATION':
6307
6328
  throw new PipelineExecutionError(spaceTrim$1((block) => `
6308
- Embedding model can not be used in pipeline
6329
+ ${modelRequirements.modelVariant} model can not be used in pipeline
6309
6330
 
6310
6331
  This should be catched during parsing
6311
6332
 
@@ -11044,17 +11065,64 @@ function parseAgentSourceWithCommitments(agentSource) {
11044
11065
  };
11045
11066
  }
11046
11067
  const lines = agentSource.split('\n');
11047
- const agentName = (((_a = lines[0]) === null || _a === void 0 ? void 0 : _a.trim()) || null);
11068
+ let agentName = null;
11069
+ let agentNameLineIndex = -1;
11070
+ // Find the agent name: first non-empty line that is not a commitment and not a horizontal line
11071
+ for (let i = 0; i < lines.length; i++) {
11072
+ const line = lines[i];
11073
+ if (line === undefined) {
11074
+ continue;
11075
+ }
11076
+ const trimmed = line.trim();
11077
+ if (!trimmed) {
11078
+ continue;
11079
+ }
11080
+ const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
11081
+ if (isHorizontal) {
11082
+ continue;
11083
+ }
11084
+ let isCommitment = false;
11085
+ for (const definition of COMMITMENT_REGISTRY) {
11086
+ const typeRegex = definition.createTypeRegex();
11087
+ const match = typeRegex.exec(trimmed);
11088
+ if (match && ((_a = match.groups) === null || _a === void 0 ? void 0 : _a.type)) {
11089
+ isCommitment = true;
11090
+ break;
11091
+ }
11092
+ }
11093
+ if (!isCommitment) {
11094
+ agentName = trimmed;
11095
+ agentNameLineIndex = i;
11096
+ break;
11097
+ }
11098
+ }
11048
11099
  const commitments = [];
11049
11100
  const nonCommitmentLines = [];
11050
- // Always add the first line (agent name) to non-commitment lines
11051
- if (lines[0] !== undefined) {
11052
- nonCommitmentLines.push(lines[0]);
11101
+ // Add lines before agentName that are horizontal lines (they are non-commitment)
11102
+ for (let i = 0; i < agentNameLineIndex; i++) {
11103
+ const line = lines[i];
11104
+ if (line === undefined) {
11105
+ continue;
11106
+ }
11107
+ const trimmed = line.trim();
11108
+ if (!trimmed) {
11109
+ continue;
11110
+ }
11111
+ const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
11112
+ if (isHorizontal) {
11113
+ nonCommitmentLines.push(line);
11114
+ }
11115
+ // Note: Commitments before agentName are not added to nonCommitmentLines
11116
+ }
11117
+ // Add the agent name line to non-commitment lines
11118
+ if (agentNameLineIndex >= 0) {
11119
+ nonCommitmentLines.push(lines[agentNameLineIndex]);
11053
11120
  }
11054
11121
  // Parse commitments with multiline support
11055
11122
  let currentCommitment = null;
11056
- // Process lines starting from the second line (skip agent name)
11057
- for (let i = 1; i < lines.length; i++) {
11123
+ // Process lines starting from after the agent name line
11124
+ const startIndex = agentNameLineIndex >= 0 ? agentNameLineIndex + 1 : 0;
11125
+ for (let i = startIndex; i < lines.length; i++) {
11058
11126
  const line = lines[i];
11059
11127
  if (line === undefined) {
11060
11128
  continue;
@@ -16871,6 +16939,9 @@ function cacheLlmTools(llmTools, options = {}) {
16871
16939
  case 'EMBEDDING':
16872
16940
  promptResult = await llmTools.callEmbeddingModel(prompt);
16873
16941
  break variant;
16942
+ case 'IMAGE_GENERATION':
16943
+ promptResult = await llmTools.callImageGenerationModel(prompt);
16944
+ break variant;
16874
16945
  // <- case [🤖]:
16875
16946
  default:
16876
16947
  throw new PipelineExecutionError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
@@ -16958,6 +17029,11 @@ function cacheLlmTools(llmTools, options = {}) {
16958
17029
  return /* not await */ callCommonModel(prompt);
16959
17030
  };
16960
17031
  }
17032
+ if (llmTools.callImageGenerationModel !== undefined) {
17033
+ proxyTools.callImageGenerationModel = async (prompt) => {
17034
+ return /* not await */ callCommonModel(prompt);
17035
+ };
17036
+ }
16961
17037
  // <- Note: [🤖]
16962
17038
  return proxyTools;
16963
17039
  }
@@ -16996,6 +17072,11 @@ function limitTotalUsage(llmTools, options = {}) {
16996
17072
  throw new LimitReachedError('Cannot call `callEmbeddingModel` because you are not allowed to spend any cost');
16997
17073
  };
16998
17074
  }
17075
+ if (proxyTools.callImageGenerationModel !== undefined) {
17076
+ proxyTools.callImageGenerationModel = async (prompt) => {
17077
+ throw new LimitReachedError('Cannot call `callImageGenerationModel` because you are not allowed to spend any cost');
17078
+ };
17079
+ }
16999
17080
  // <- Note: [🤖]
17000
17081
  return proxyTools;
17001
17082
  }
@@ -18067,7 +18148,7 @@ class OpenAiCompatibleExecutionTools {
18067
18148
  let threadMessages = [];
18068
18149
  if ('thread' in prompt && Array.isArray(prompt.thread)) {
18069
18150
  threadMessages = prompt.thread.map((msg) => ({
18070
- role: msg.role === 'assistant' ? 'assistant' : 'user',
18151
+ role: msg.sender === 'assistant' ? 'assistant' : 'user',
18071
18152
  content: msg.content,
18072
18153
  }));
18073
18154
  }