@promptbook/components 0.104.0-2 → 0.104.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -185,6 +185,7 @@ import type { BookTranspiler } from '../transpilers/_common/BookTranspiler';
185
185
  import type { BookTranspilerOptions } from '../transpilers/_common/BookTranspilerOptions';
186
186
  import type { IntermediateFilesStrategy } from '../types/IntermediateFilesStrategy';
187
187
  import type { LlmCall } from '../types/LlmCall';
188
+ import type { Message } from '../types/Message';
188
189
  import type { ModelRequirements } from '../types/ModelRequirements';
189
190
  import type { CompletionModelRequirements } from '../types/ModelRequirements';
190
191
  import type { ChatModelRequirements } from '../types/ModelRequirements';
@@ -551,6 +552,7 @@ export type { BookTranspiler };
551
552
  export type { BookTranspilerOptions };
552
553
  export type { IntermediateFilesStrategy };
553
554
  export type { LlmCall };
555
+ export type { Message };
554
556
  export type { ModelRequirements };
555
557
  export type { CompletionModelRequirements };
556
558
  export type { ChatModelRequirements };
@@ -1,22 +1,17 @@
1
+ import { Message } from '../../../types/Message';
1
2
  import type { id, string_markdown } from '../../../types/typeAliases';
2
3
  /**
3
4
  * A message in the chat
4
5
  *
5
6
  * @public exported from `@promptbook/components`
6
7
  */
7
- export type ChatMessage = {
8
+ export type ChatMessage = Omit<Message<id>, 'direction' | 'recipients' | 'threadId' | 'metadata'> & {
8
9
  /**
9
- * Unique identifier of the message
10
- */
11
- id?: id;
12
- /**
13
- * Date when the message was created
14
- */
15
- date?: Date;
16
- /**
17
- * The name of the participant who sent the message
10
+ * Force the channel to be 'PROMPTBOOK_CHAT'
11
+ *
12
+ * @default 'PROMPTBOOK_CHAT'
18
13
  */
19
- from: id;
14
+ channel?: 'PROMPTBOOK_CHAT';
20
15
  /**
21
16
  * The content of the message with optional markdown formatting
22
17
  */
@@ -37,6 +32,7 @@ export type ChatMessage = {
37
32
  isVoiceCall?: boolean;
38
33
  };
39
34
  /**
35
+ * TODO: Make all fields readonly
40
36
  * TODO: Delete `expectedAnswer` from ChatMessage
41
37
  * TODO: Rename `date` into `created`+`modified`
42
38
  */
@@ -1,8 +1,8 @@
1
1
  import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
5
- import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult, PromptResult } from '../../execution/PromptResult';
5
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, ImagePrompt, Prompt } from '../../types/Prompt';
6
6
  import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
7
7
  /**
8
8
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
@@ -43,6 +43,10 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
43
43
  * Calls the best available embedding model
44
44
  */
45
45
  callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
46
+ /**
47
+ * Calls the best available embedding model
48
+ */
49
+ callImageGenerationModel(prompt: ImagePrompt): Promise<ImagePromptResult>;
46
50
  /**
47
51
  * Calls the best available model
48
52
  *
@@ -46,6 +46,7 @@ export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> impleme
46
46
  private callCommonModel;
47
47
  }
48
48
  /**
49
+ * TODO: !!!! Deprecate pipeline server and all of its components
49
50
  * TODO: Maybe use `$exportJson`
50
51
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
51
52
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -0,0 +1,49 @@
1
+ import { Arrayable } from 'type-fest';
2
+ import { really_any } from '../_packages/types.index';
3
+ import { id, string_date_iso8601, string_markdown } from './typeAliases';
4
+ /**
5
+ * A generic message structure for various communication channels
6
+ */
7
+ export type Message<TParticipant> = {
8
+ /**
9
+ * Unique identifier of the message
10
+ */
11
+ readonly id?: id;
12
+ /**
13
+ * Date when the message was created
14
+ */
15
+ readonly createdAt?: Date | string_date_iso8601;
16
+ /**
17
+ * The communication channel of the message
18
+ */
19
+ readonly channel?: 'PROMPTBOOK_CHAT' | 'EMAIL' | 'SMS' | 'WHATSAPP' | 'TELEGRAM' | 'SIGNAL' | string | 'UNKNOWN';
20
+ /**
21
+ * Is the message send from the Promptbook or to the Promptbook
22
+ */
23
+ readonly direction?: 'INBOUND' | 'OUTBOUND' | 'INTERNAL' | 'INITIAL';
24
+ /**
25
+ * Who sent the message
26
+ */
27
+ readonly sender: TParticipant;
28
+ /**
29
+ * Who are the recipients of the message
30
+ */
31
+ readonly recipients?: Readonly<Arrayable<TParticipant>>;
32
+ /**
33
+ * The content of the message as markdown
34
+ *
35
+ * Note: We are converting all message content to markdown for consistency
36
+ */
37
+ readonly content: string_markdown;
38
+ /**
39
+ * The thread identifier the message belongs to
40
+ *
41
+ * - `null` means the message is not part of any thread
42
+ * - `undefined` means that we don't know if the message is part of a thread or not
43
+ */
44
+ readonly threadId?: id | null;
45
+ /**
46
+ * Arbitrary metadata associated with the message
47
+ */
48
+ readonly metadata?: Readonly<Record<string, really_any>>;
49
+ };
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-1`).
18
+ * It follows semantic versioning (e.g., `0.104.0-3`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/components",
3
- "version": "0.104.0-2",
3
+ "version": "0.104.0-4",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -30,7 +30,7 @@
30
30
  * @generated
31
31
  * @see https://github.com/webgptorg/promptbook
32
32
  */
33
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-2';
33
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
34
34
  /**
35
35
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
36
36
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -7114,17 +7114,64 @@
7114
7114
  };
7115
7115
  }
7116
7116
  const lines = agentSource.split('\n');
7117
- const agentName = (((_a = lines[0]) === null || _a === void 0 ? void 0 : _a.trim()) || null);
7117
+ let agentName = null;
7118
+ let agentNameLineIndex = -1;
7119
+ // Find the agent name: first non-empty line that is not a commitment and not a horizontal line
7120
+ for (let i = 0; i < lines.length; i++) {
7121
+ const line = lines[i];
7122
+ if (line === undefined) {
7123
+ continue;
7124
+ }
7125
+ const trimmed = line.trim();
7126
+ if (!trimmed) {
7127
+ continue;
7128
+ }
7129
+ const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
7130
+ if (isHorizontal) {
7131
+ continue;
7132
+ }
7133
+ let isCommitment = false;
7134
+ for (const definition of COMMITMENT_REGISTRY) {
7135
+ const typeRegex = definition.createTypeRegex();
7136
+ const match = typeRegex.exec(trimmed);
7137
+ if (match && ((_a = match.groups) === null || _a === void 0 ? void 0 : _a.type)) {
7138
+ isCommitment = true;
7139
+ break;
7140
+ }
7141
+ }
7142
+ if (!isCommitment) {
7143
+ agentName = trimmed;
7144
+ agentNameLineIndex = i;
7145
+ break;
7146
+ }
7147
+ }
7118
7148
  const commitments = [];
7119
7149
  const nonCommitmentLines = [];
7120
- // Always add the first line (agent name) to non-commitment lines
7121
- if (lines[0] !== undefined) {
7122
- nonCommitmentLines.push(lines[0]);
7150
+ // Add lines before agentName that are horizontal lines (they are non-commitment)
7151
+ for (let i = 0; i < agentNameLineIndex; i++) {
7152
+ const line = lines[i];
7153
+ if (line === undefined) {
7154
+ continue;
7155
+ }
7156
+ const trimmed = line.trim();
7157
+ if (!trimmed) {
7158
+ continue;
7159
+ }
7160
+ const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
7161
+ if (isHorizontal) {
7162
+ nonCommitmentLines.push(line);
7163
+ }
7164
+ // Note: Commitments before agentName are not added to nonCommitmentLines
7165
+ }
7166
+ // Add the agent name line to non-commitment lines
7167
+ if (agentNameLineIndex >= 0) {
7168
+ nonCommitmentLines.push(lines[agentNameLineIndex]);
7123
7169
  }
7124
7170
  // Parse commitments with multiline support
7125
7171
  let currentCommitment = null;
7126
- // Process lines starting from the second line (skip agent name)
7127
- for (let i = 1; i < lines.length; i++) {
7172
+ // Process lines starting from after the agent name line
7173
+ const startIndex = agentNameLineIndex >= 0 ? agentNameLineIndex + 1 : 0;
7174
+ for (let i = startIndex; i < lines.length; i++) {
7128
7175
  const line = lines[i];
7129
7176
  if (line === undefined) {
7130
7177
  continue;
@@ -8316,6 +8363,7 @@
8316
8363
  const parameterRegex = /@([a-zA-Z0-9_á-žÁ-Žč-řČ-Řš-žŠ-Žа-яА-ЯёЁ]+)/;
8317
8364
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
8318
8365
  const bookRules = [
8366
+ [/^---[-]*$/, ''],
8319
8367
  [parameterRegex, 'parameter'],
8320
8368
  [/\{[^}]+\}/, 'parameter'],
8321
8369
  [commitmentRegex, 'commitment'],
@@ -8324,7 +8372,12 @@
8324
8372
  const tokenProvider = monaco.languages.setMonarchTokensProvider(BOOK_LANGUAGE_ID, {
8325
8373
  ignoreCase: true,
8326
8374
  tokenizer: {
8327
- root: [[/^.*$/, 'title', '@body']],
8375
+ root: [
8376
+ [/^\s*$/, 'empty'],
8377
+ [/^-*$/, 'line'],
8378
+ [/^.*$/, 'title', '@body'],
8379
+ [commitmentRegex, 'commitment'],
8380
+ ],
8328
8381
  body: bookRules,
8329
8382
  },
8330
8383
  });
@@ -9250,15 +9303,15 @@
9250
9303
  ASSISTANT: '#ffb300',
9251
9304
  SYSTEM: '#888',
9252
9305
  };
9253
- const bgColor = participantColors[String(message.from)] || '#2b7cff';
9306
+ const bgColor = participantColors[String(message.sender)] || '#2b7cff';
9254
9307
  const textColor = getTextColor(bgColor);
9255
9308
  return spaceTrim__default["default"](`
9256
9309
  <div class="chat-message">
9257
9310
  <div class="avatar" style="background:${bgColor};color:${getTextColor(bgColor)};">
9258
- ${String(message.from)[0] || '?'}
9311
+ ${String(message.sender)[0] || '?'}
9259
9312
  </div>
9260
9313
  <div class="bubble" style="background:${bgColor};color:${textColor};">
9261
- <span class="from-label">${String(message.from)}:</span>
9314
+ <span class="from-label">${String(message.sender)}:</span>
9262
9315
  ${message.content}
9263
9316
  </div>
9264
9317
  </div>
@@ -9304,7 +9357,7 @@
9304
9357
  getContent: ({ messages }) => spaceTrim$1.spaceTrim(`
9305
9358
  ${messages
9306
9359
  .map((message) => spaceTrim$1.spaceTrim(`
9307
- **${message.from}:**
9360
+ **${message.sender}:**
9308
9361
 
9309
9362
  > ${message.content.replace(/\n/g, '\n> ')}
9310
9363
  `))
@@ -9586,7 +9639,7 @@
9586
9639
  console.info('participant avatarSrc', avatarSrc);
9587
9640
  console.info('participant color', { color, colorOfText });
9588
9641
  console.groupEnd();
9589
- }, children: [avatarSrc && (jsxRuntime.jsxs("div", { ref: avatarRef, className: chatStyles.avatar, onMouseEnter: handleMouseEnter, onMouseLeave: handleMouseLeave, onClick: showTooltip, children: [jsxRuntime.jsx("img", { width: AVATAR_SIZE, src: avatarSrc, alt: `Avatar of ${message.from.toString().toLocaleLowerCase()}`, style: {
9642
+ }, children: [avatarSrc && (jsxRuntime.jsxs("div", { ref: avatarRef, className: chatStyles.avatar, onMouseEnter: handleMouseEnter, onMouseLeave: handleMouseLeave, onClick: showTooltip, children: [jsxRuntime.jsx("img", { width: AVATAR_SIZE, src: avatarSrc, alt: `Avatar of ${message.sender.toString().toLocaleLowerCase()}`, style: {
9590
9643
  '--avatar-bg-color': color.toHex(),
9591
9644
  width: AVATAR_SIZE,
9592
9645
  } }), isAvatarTooltipVisible && (participant === null || participant === void 0 ? void 0 : participant.agentSource) && avatarTooltipPosition && (jsxRuntime.jsx(AvatarProfileTooltip, { ref: tooltipRef, agentSource: participant.agentSource, position: avatarTooltipPosition }))] })), jsxRuntime.jsxs("div", { className: chatStyles.messageText, style: {
@@ -9935,7 +9988,7 @@
9935
9988
  }
9936
9989
  }, [ratingModalOpen, isMobile]);
9937
9990
  // Determine alignment for actions (Reset button) based on the first message
9938
- const firstMessageFromUser = ((_a = messages[0]) === null || _a === void 0 ? void 0 : _a.from) === 'USER';
9991
+ const firstMessageFromUser = ((_a = messages[0]) === null || _a === void 0 ? void 0 : _a.sender) === 'USER';
9939
9992
  const actionsAlignmentClass = firstMessageFromUser
9940
9993
  ? chatStyles.actions + ' ' + chatStyles.left
9941
9994
  : chatStyles.actions + ' ' + chatStyles.right;
@@ -10012,7 +10065,7 @@
10012
10065
  }
10013
10066
  return true;
10014
10067
  })() && chatStyles.hasActionsAndFirstMessageIsLong), ref: chatMessagesRef, onScroll: handleScroll, children: [postprocessedMessages.map((message, i) => {
10015
- const participant = participants.find((participant) => participant.name === message.from);
10068
+ const participant = participants.find((participant) => participant.name === message.sender);
10016
10069
  const isLastMessage = i === postprocessedMessages.length - 1;
10017
10070
  const isExpanded = expandedMessageId === message.id;
10018
10071
  const currentRating = messageRatings.get(message.id || message.content /* <-[💃] */) || 0;
@@ -10104,13 +10157,13 @@
10104
10157
  const idx = postprocessedMessages.findIndex((m) => m.id === selectedMessage.id);
10105
10158
  if (idx > 0) {
10106
10159
  const prev = postprocessedMessages[idx - 1];
10107
- if (prev.from === 'USER') {
10160
+ if (prev.sender === 'USER') {
10108
10161
  return prev.content;
10109
10162
  }
10110
10163
  }
10111
10164
  // fallback: find last USER message before selectedMessage
10112
10165
  for (let i = messages.findIndex((m) => m.id === selectedMessage.id) - 1; i >= 0; i--) {
10113
- if (messages[i].from === 'USER') {
10166
+ if (messages[i].sender === 'USER') {
10114
10167
  return messages[i].content;
10115
10168
  }
10116
10169
  }
@@ -10137,7 +10190,9 @@
10137
10190
  try {
10138
10191
  const serializableMessages = messages.map((message) => ({
10139
10192
  ...message,
10140
- date: (message.date || new Date()).toISOString(),
10193
+ createdAt: (typeof message.createdAt === 'string'
10194
+ ? new Date(message.createdAt)
10195
+ : message.createdAt || new Date()).toISOString(),
10141
10196
  }));
10142
10197
  const storageKey = this.STORAGE_PREFIX + persistenceKey;
10143
10198
  localStorage.setItem(storageKey, JSON.stringify(serializableMessages));
@@ -10160,7 +10215,7 @@
10160
10215
  // Convert date strings back to Date objects
10161
10216
  return serializableMessages.map((message) => ({
10162
10217
  ...message,
10163
- date: new Date(message.date),
10218
+ createdAt: new Date(message.createdAt),
10164
10219
  }));
10165
10220
  }
10166
10221
  catch (error) {
@@ -10213,7 +10268,7 @@
10213
10268
  const { llmTools, persistenceKey, onChange, onReset, initialMessages, sendMessage, userParticipantName = 'USER', llmParticipantName = 'ASSISTANT', autoExecuteMessage, buttonColor, ...restProps } = props;
10214
10269
  // Internal state management
10215
10270
  // DRY: Single factory for seeding initial messages (used on mount and after reset)
10216
- const buildInitialMessages = react.useCallback(() => (initialMessages ? [...initialMessages] : []), [initialMessages]);
10271
+ const buildInitialMessages = react.useCallback(() => initialMessages ? ([...initialMessages]) : ([]), [initialMessages]);
10217
10272
  const [messages, setMessages] = react.useState(() => buildInitialMessages());
10218
10273
  const [tasksProgress, setTasksProgress] = react.useState([]);
10219
10274
  const [isVoiceCalling, setIsVoiceCalling] = react.useState(false);
@@ -10306,17 +10361,19 @@
10306
10361
  setTasksProgress([{ id: taskId, name: 'Playing response...', progress: 100 }]);
10307
10362
  const now = Date.now();
10308
10363
  const userMessage = {
10364
+ // channel: 'PROMPTBOOK_CHAT',
10309
10365
  id: `user_${now}`,
10310
- date: new Date(),
10311
- from: userParticipantName,
10366
+ createdAt: new Date(),
10367
+ sender: userParticipantName,
10312
10368
  content: (result.userMessage || '(Voice message)'),
10313
10369
  isComplete: true,
10314
10370
  isVoiceCall: true,
10315
10371
  };
10316
10372
  const agentMessage = {
10373
+ // channel: 'PROMPTBOOK_CHAT',
10317
10374
  id: `agent_${now}`,
10318
- date: new Date(),
10319
- from: llmParticipantName,
10375
+ createdAt: new Date(),
10376
+ sender: llmParticipantName,
10320
10377
  content: (result.agentMessage || result.text),
10321
10378
  isComplete: true,
10322
10379
  isVoiceCall: true,
@@ -10413,9 +10470,10 @@
10413
10470
  hasUserInteractedRef.current = true;
10414
10471
  // Add user message
10415
10472
  const userMessage = {
10473
+ // channel: 'PROMPTBOOK_CHAT',
10416
10474
  id: `user_${Date.now()}`,
10417
- date: new Date(),
10418
- from: userParticipantName,
10475
+ createdAt: new Date(),
10476
+ sender: userParticipantName,
10419
10477
  content: messageContent,
10420
10478
  isComplete: true,
10421
10479
  };
@@ -10427,9 +10485,10 @@
10427
10485
  }
10428
10486
  // Add loading message for assistant
10429
10487
  const loadingMessage = {
10488
+ // channel: 'PROMPTBOOK_CHAT',
10430
10489
  id: `assistant_${Date.now()}`,
10431
- date: new Date(),
10432
- from: llmParticipantName,
10490
+ createdAt: new Date(),
10491
+ sender: llmParticipantName,
10433
10492
  content: 'Thinking...',
10434
10493
  isComplete: false,
10435
10494
  };
@@ -10460,9 +10519,10 @@
10460
10519
  if (llmTools.callChatModelStream) {
10461
10520
  result = await llmTools.callChatModelStream(prompt, (chunk) => {
10462
10521
  const assistantMessage = {
10522
+ // channel: 'PROMPTBOOK_CHAT',
10463
10523
  id: loadingMessage.id,
10464
- date: new Date(),
10465
- from: llmParticipantName,
10524
+ createdAt: new Date(),
10525
+ sender: llmParticipantName,
10466
10526
  content: chunk.content,
10467
10527
  isComplete: false,
10468
10528
  };
@@ -10483,9 +10543,10 @@
10483
10543
  setTasksProgress([{ id: taskId, name: 'Response generated', progress: 100 }]);
10484
10544
  // Replace loading message with actual response
10485
10545
  const assistantMessage = {
10546
+ // channel: 'PROMPTBOOK_CHAT',
10486
10547
  id: loadingMessage.id,
10487
- date: new Date(),
10488
- from: llmParticipantName,
10548
+ createdAt: new Date(),
10549
+ sender: llmParticipantName,
10489
10550
  content: result.content,
10490
10551
  isComplete: true,
10491
10552
  };
@@ -10504,9 +10565,10 @@
10504
10565
  console.error('Error calling LLM:', error);
10505
10566
  // Replace loading message with error message
10506
10567
  const errorMessage = {
10568
+ // channel: 'PROMPTBOOK_CHAT',
10507
10569
  id: loadingMessage.id,
10508
- date: new Date(),
10509
- from: llmParticipantName,
10570
+ createdAt: new Date(),
10571
+ sender: llmParticipantName,
10510
10572
  content: `Sorry, I encountered an error: ${error instanceof Error ? error.message : 'Unknown error'}`,
10511
10573
  isComplete: true,
10512
10574
  };
@@ -10573,7 +10635,8 @@
10573
10635
  return (jsxRuntime.jsx(jsxRuntime.Fragment, { children: jsxRuntime.jsx(LlmChat, { title: title || `Chat with ${agent.meta.fullname || agent.agentName || 'Agent'}`, persistenceKey: persistenceKey || `agent-chat-${agent.agentName}`, userParticipantName: "USER", llmParticipantName: "AGENT" // <- TODO: [🧠] Maybe dynamic agent id
10574
10636
  , initialMessages: [
10575
10637
  {
10576
- from: 'AGENT',
10638
+ // channel: 'PROMPTBOOK_CHAT',
10639
+ sender: 'AGENT',
10577
10640
  content: agent.initialMessage ||
10578
10641
  spaceTrim__default["default"](`
10579
10642
 
@@ -10851,7 +10914,7 @@
10851
10914
  if (delays.longPauseChance &&
10852
10915
  Math.random() < delays.longPauseChance &&
10853
10916
  i > 0 &&
10854
- originalMessages[i].from !== originalMessages[i - 1].from) {
10917
+ originalMessages[i].sender !== originalMessages[i - 1].sender) {
10855
10918
  await waitasecond.forTime(getDelay(delays.longPauseDuration, 2000));
10856
10919
  didLongPause = true;
10857
10920
  if (isCancelled)
@@ -10872,9 +10935,10 @@
10872
10935
  }
10873
10936
  // Show incomplete message first (for typing effect)
10874
10937
  const incompleteMessage = {
10938
+ // channel: 'PROMPTBOOK_CHAT',
10875
10939
  id: currentMessage.id,
10876
- date: currentMessage.date,
10877
- from: currentMessage.from,
10940
+ createdAt: currentMessage.createdAt,
10941
+ sender: currentMessage.sender,
10878
10942
  content: '',
10879
10943
  isComplete: false,
10880
10944
  expectedAnswer: currentMessage.expectedAnswer,
@@ -10892,9 +10956,10 @@
10892
10956
  currentContent += (wordIndex > 0 ? ' ' : '') + word;
10893
10957
  // Update the message with current content
10894
10958
  const updatingMessage = {
10959
+ // channel: 'PROMPTBOOK_CHAT',
10895
10960
  id: currentMessage.id,
10896
- date: currentMessage.date,
10897
- from: currentMessage.from,
10961
+ createdAt: currentMessage.createdAt,
10962
+ sender: currentMessage.sender,
10898
10963
  content: currentContent,
10899
10964
  isComplete: false,
10900
10965
  expectedAnswer: currentMessage.expectedAnswer,
@@ -10912,9 +10977,10 @@
10912
10977
  }
10913
10978
  // Mark message as complete
10914
10979
  const completeMessage = {
10980
+ // channel: 'PROMPTBOOK_CHAT',
10915
10981
  id: currentMessage.id,
10916
- date: currentMessage.date,
10917
- from: currentMessage.from,
10982
+ createdAt: currentMessage.createdAt,
10983
+ sender: currentMessage.sender,
10918
10984
  content: currentMessage.content,
10919
10985
  isComplete: true,
10920
10986
  expectedAnswer: currentMessage.expectedAnswer,
@@ -11166,6 +11232,12 @@
11166
11232
  callEmbeddingModel(prompt) {
11167
11233
  return this.callCommonModel(prompt);
11168
11234
  }
11235
+ /**
11236
+ * Calls the best available embedding model
11237
+ */
11238
+ callImageGenerationModel(prompt) {
11239
+ return this.callCommonModel(prompt);
11240
+ }
11169
11241
  // <- Note: [🤖]
11170
11242
  /**
11171
11243
  * Calls the best available model
@@ -11192,6 +11264,11 @@
11192
11264
  continue llm;
11193
11265
  }
11194
11266
  return await llmExecutionTools.callEmbeddingModel(prompt);
11267
+ case 'IMAGE_GENERATION':
11268
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
11269
+ continue llm;
11270
+ }
11271
+ return await llmExecutionTools.callImageGenerationModel(prompt);
11195
11272
  // <- case [🤖]:
11196
11273
  default:
11197
11274
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -12366,6 +12443,15 @@
12366
12443
  return promptResult;
12367
12444
  };
12368
12445
  }
12446
+ if (llmTools.callImageGenerationModel !== undefined) {
12447
+ proxyTools.callImageGenerationModel = async (prompt) => {
12448
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
12449
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
12450
+ totalUsage = addUsage(totalUsage, promptResult.usage);
12451
+ spending.next(promptResult.usage);
12452
+ return promptResult;
12453
+ };
12454
+ }
12369
12455
  // <- Note: [🤖]
12370
12456
  return proxyTools;
12371
12457
  }
@@ -13909,8 +13995,9 @@
13909
13995
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
13910
13996
  break variant;
13911
13997
  case 'EMBEDDING':
13998
+ case 'IMAGE_GENERATION':
13912
13999
  throw new PipelineExecutionError(spaceTrim$1.spaceTrim((block) => `
13913
- Embedding model can not be used in pipeline
14000
+ ${modelRequirements.modelVariant} model can not be used in pipeline
13914
14001
 
13915
14002
  This should be catched during parsing
13916
14003
 
@@ -16241,7 +16328,7 @@
16241
16328
  let threadMessages = [];
16242
16329
  if ('thread' in prompt && Array.isArray(prompt.thread)) {
16243
16330
  threadMessages = prompt.thread.map((msg) => ({
16244
- role: msg.role === 'assistant' ? 'assistant' : 'user',
16331
+ role: msg.sender === 'assistant' ? 'assistant' : 'user',
16245
16332
  content: msg.content,
16246
16333
  }));
16247
16334
  }