@promptbook/components 0.104.0-2 → 0.104.0-4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +131 -44
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/types.index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +7 -11
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +6 -2
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/types/Message.d.ts +49 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +131 -44
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -35,7 +35,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
|
|
|
35
35
|
* @generated
|
|
36
36
|
* @see https://github.com/webgptorg/promptbook
|
|
37
37
|
*/
|
|
38
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-
|
|
38
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
|
|
39
39
|
/**
|
|
40
40
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
41
41
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -7119,17 +7119,64 @@ function parseAgentSourceWithCommitments(agentSource) {
|
|
|
7119
7119
|
};
|
|
7120
7120
|
}
|
|
7121
7121
|
const lines = agentSource.split('\n');
|
|
7122
|
-
|
|
7122
|
+
let agentName = null;
|
|
7123
|
+
let agentNameLineIndex = -1;
|
|
7124
|
+
// Find the agent name: first non-empty line that is not a commitment and not a horizontal line
|
|
7125
|
+
for (let i = 0; i < lines.length; i++) {
|
|
7126
|
+
const line = lines[i];
|
|
7127
|
+
if (line === undefined) {
|
|
7128
|
+
continue;
|
|
7129
|
+
}
|
|
7130
|
+
const trimmed = line.trim();
|
|
7131
|
+
if (!trimmed) {
|
|
7132
|
+
continue;
|
|
7133
|
+
}
|
|
7134
|
+
const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
|
|
7135
|
+
if (isHorizontal) {
|
|
7136
|
+
continue;
|
|
7137
|
+
}
|
|
7138
|
+
let isCommitment = false;
|
|
7139
|
+
for (const definition of COMMITMENT_REGISTRY) {
|
|
7140
|
+
const typeRegex = definition.createTypeRegex();
|
|
7141
|
+
const match = typeRegex.exec(trimmed);
|
|
7142
|
+
if (match && ((_a = match.groups) === null || _a === void 0 ? void 0 : _a.type)) {
|
|
7143
|
+
isCommitment = true;
|
|
7144
|
+
break;
|
|
7145
|
+
}
|
|
7146
|
+
}
|
|
7147
|
+
if (!isCommitment) {
|
|
7148
|
+
agentName = trimmed;
|
|
7149
|
+
agentNameLineIndex = i;
|
|
7150
|
+
break;
|
|
7151
|
+
}
|
|
7152
|
+
}
|
|
7123
7153
|
const commitments = [];
|
|
7124
7154
|
const nonCommitmentLines = [];
|
|
7125
|
-
//
|
|
7126
|
-
|
|
7127
|
-
|
|
7155
|
+
// Add lines before agentName that are horizontal lines (they are non-commitment)
|
|
7156
|
+
for (let i = 0; i < agentNameLineIndex; i++) {
|
|
7157
|
+
const line = lines[i];
|
|
7158
|
+
if (line === undefined) {
|
|
7159
|
+
continue;
|
|
7160
|
+
}
|
|
7161
|
+
const trimmed = line.trim();
|
|
7162
|
+
if (!trimmed) {
|
|
7163
|
+
continue;
|
|
7164
|
+
}
|
|
7165
|
+
const isHorizontal = HORIZONTAL_LINE_PATTERN.test(line);
|
|
7166
|
+
if (isHorizontal) {
|
|
7167
|
+
nonCommitmentLines.push(line);
|
|
7168
|
+
}
|
|
7169
|
+
// Note: Commitments before agentName are not added to nonCommitmentLines
|
|
7170
|
+
}
|
|
7171
|
+
// Add the agent name line to non-commitment lines
|
|
7172
|
+
if (agentNameLineIndex >= 0) {
|
|
7173
|
+
nonCommitmentLines.push(lines[agentNameLineIndex]);
|
|
7128
7174
|
}
|
|
7129
7175
|
// Parse commitments with multiline support
|
|
7130
7176
|
let currentCommitment = null;
|
|
7131
|
-
// Process lines starting from the
|
|
7132
|
-
|
|
7177
|
+
// Process lines starting from after the agent name line
|
|
7178
|
+
const startIndex = agentNameLineIndex >= 0 ? agentNameLineIndex + 1 : 0;
|
|
7179
|
+
for (let i = startIndex; i < lines.length; i++) {
|
|
7133
7180
|
const line = lines[i];
|
|
7134
7181
|
if (line === undefined) {
|
|
7135
7182
|
continue;
|
|
@@ -8321,6 +8368,7 @@ function BookEditorMonaco(props) {
|
|
|
8321
8368
|
const parameterRegex = /@([a-zA-Z0-9_á-žÁ-Žč-řČ-Řš-žŠ-Žа-яА-ЯёЁ]+)/;
|
|
8322
8369
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
8323
8370
|
const bookRules = [
|
|
8371
|
+
[/^---[-]*$/, ''],
|
|
8324
8372
|
[parameterRegex, 'parameter'],
|
|
8325
8373
|
[/\{[^}]+\}/, 'parameter'],
|
|
8326
8374
|
[commitmentRegex, 'commitment'],
|
|
@@ -8329,7 +8377,12 @@ function BookEditorMonaco(props) {
|
|
|
8329
8377
|
const tokenProvider = monaco.languages.setMonarchTokensProvider(BOOK_LANGUAGE_ID, {
|
|
8330
8378
|
ignoreCase: true,
|
|
8331
8379
|
tokenizer: {
|
|
8332
|
-
root: [
|
|
8380
|
+
root: [
|
|
8381
|
+
[/^\s*$/, 'empty'],
|
|
8382
|
+
[/^-*$/, 'line'],
|
|
8383
|
+
[/^.*$/, 'title', '@body'],
|
|
8384
|
+
[commitmentRegex, 'commitment'],
|
|
8385
|
+
],
|
|
8333
8386
|
body: bookRules,
|
|
8334
8387
|
},
|
|
8335
8388
|
});
|
|
@@ -9255,15 +9308,15 @@ const htmlSaveFormatDefinition = {
|
|
|
9255
9308
|
ASSISTANT: '#ffb300',
|
|
9256
9309
|
SYSTEM: '#888',
|
|
9257
9310
|
};
|
|
9258
|
-
const bgColor = participantColors[String(message.
|
|
9311
|
+
const bgColor = participantColors[String(message.sender)] || '#2b7cff';
|
|
9259
9312
|
const textColor = getTextColor(bgColor);
|
|
9260
9313
|
return spaceTrim$2(`
|
|
9261
9314
|
<div class="chat-message">
|
|
9262
9315
|
<div class="avatar" style="background:${bgColor};color:${getTextColor(bgColor)};">
|
|
9263
|
-
${String(message.
|
|
9316
|
+
${String(message.sender)[0] || '?'}
|
|
9264
9317
|
</div>
|
|
9265
9318
|
<div class="bubble" style="background:${bgColor};color:${textColor};">
|
|
9266
|
-
<span class="from-label">${String(message.
|
|
9319
|
+
<span class="from-label">${String(message.sender)}:</span>
|
|
9267
9320
|
${message.content}
|
|
9268
9321
|
</div>
|
|
9269
9322
|
</div>
|
|
@@ -9309,7 +9362,7 @@ const mdSaveFormatDefinition = {
|
|
|
9309
9362
|
getContent: ({ messages }) => spaceTrim$1(`
|
|
9310
9363
|
${messages
|
|
9311
9364
|
.map((message) => spaceTrim$1(`
|
|
9312
|
-
**${message.
|
|
9365
|
+
**${message.sender}:**
|
|
9313
9366
|
|
|
9314
9367
|
> ${message.content.replace(/\n/g, '\n> ')}
|
|
9315
9368
|
`))
|
|
@@ -9591,7 +9644,7 @@ const ChatMessageItem = memo(({ message, participant, participants, isLastMessag
|
|
|
9591
9644
|
console.info('participant avatarSrc', avatarSrc);
|
|
9592
9645
|
console.info('participant color', { color, colorOfText });
|
|
9593
9646
|
console.groupEnd();
|
|
9594
|
-
}, children: [avatarSrc && (jsxs("div", { ref: avatarRef, className: chatStyles.avatar, onMouseEnter: handleMouseEnter, onMouseLeave: handleMouseLeave, onClick: showTooltip, children: [jsx("img", { width: AVATAR_SIZE, src: avatarSrc, alt: `Avatar of ${message.
|
|
9647
|
+
}, children: [avatarSrc && (jsxs("div", { ref: avatarRef, className: chatStyles.avatar, onMouseEnter: handleMouseEnter, onMouseLeave: handleMouseLeave, onClick: showTooltip, children: [jsx("img", { width: AVATAR_SIZE, src: avatarSrc, alt: `Avatar of ${message.sender.toString().toLocaleLowerCase()}`, style: {
|
|
9595
9648
|
'--avatar-bg-color': color.toHex(),
|
|
9596
9649
|
width: AVATAR_SIZE,
|
|
9597
9650
|
} }), isAvatarTooltipVisible && (participant === null || participant === void 0 ? void 0 : participant.agentSource) && avatarTooltipPosition && (jsx(AvatarProfileTooltip, { ref: tooltipRef, agentSource: participant.agentSource, position: avatarTooltipPosition }))] })), jsxs("div", { className: chatStyles.messageText, style: {
|
|
@@ -9940,7 +9993,7 @@ function Chat(props) {
|
|
|
9940
9993
|
}
|
|
9941
9994
|
}, [ratingModalOpen, isMobile]);
|
|
9942
9995
|
// Determine alignment for actions (Reset button) based on the first message
|
|
9943
|
-
const firstMessageFromUser = ((_a = messages[0]) === null || _a === void 0 ? void 0 : _a.
|
|
9996
|
+
const firstMessageFromUser = ((_a = messages[0]) === null || _a === void 0 ? void 0 : _a.sender) === 'USER';
|
|
9944
9997
|
const actionsAlignmentClass = firstMessageFromUser
|
|
9945
9998
|
? chatStyles.actions + ' ' + chatStyles.left
|
|
9946
9999
|
: chatStyles.actions + ' ' + chatStyles.right;
|
|
@@ -10017,7 +10070,7 @@ function Chat(props) {
|
|
|
10017
10070
|
}
|
|
10018
10071
|
return true;
|
|
10019
10072
|
})() && chatStyles.hasActionsAndFirstMessageIsLong), ref: chatMessagesRef, onScroll: handleScroll, children: [postprocessedMessages.map((message, i) => {
|
|
10020
|
-
const participant = participants.find((participant) => participant.name === message.
|
|
10073
|
+
const participant = participants.find((participant) => participant.name === message.sender);
|
|
10021
10074
|
const isLastMessage = i === postprocessedMessages.length - 1;
|
|
10022
10075
|
const isExpanded = expandedMessageId === message.id;
|
|
10023
10076
|
const currentRating = messageRatings.get(message.id || message.content /* <-[💃] */) || 0;
|
|
@@ -10109,13 +10162,13 @@ function Chat(props) {
|
|
|
10109
10162
|
const idx = postprocessedMessages.findIndex((m) => m.id === selectedMessage.id);
|
|
10110
10163
|
if (idx > 0) {
|
|
10111
10164
|
const prev = postprocessedMessages[idx - 1];
|
|
10112
|
-
if (prev.
|
|
10165
|
+
if (prev.sender === 'USER') {
|
|
10113
10166
|
return prev.content;
|
|
10114
10167
|
}
|
|
10115
10168
|
}
|
|
10116
10169
|
// fallback: find last USER message before selectedMessage
|
|
10117
10170
|
for (let i = messages.findIndex((m) => m.id === selectedMessage.id) - 1; i >= 0; i--) {
|
|
10118
|
-
if (messages[i].
|
|
10171
|
+
if (messages[i].sender === 'USER') {
|
|
10119
10172
|
return messages[i].content;
|
|
10120
10173
|
}
|
|
10121
10174
|
}
|
|
@@ -10142,7 +10195,9 @@ class ChatPersistence {
|
|
|
10142
10195
|
try {
|
|
10143
10196
|
const serializableMessages = messages.map((message) => ({
|
|
10144
10197
|
...message,
|
|
10145
|
-
|
|
10198
|
+
createdAt: (typeof message.createdAt === 'string'
|
|
10199
|
+
? new Date(message.createdAt)
|
|
10200
|
+
: message.createdAt || new Date()).toISOString(),
|
|
10146
10201
|
}));
|
|
10147
10202
|
const storageKey = this.STORAGE_PREFIX + persistenceKey;
|
|
10148
10203
|
localStorage.setItem(storageKey, JSON.stringify(serializableMessages));
|
|
@@ -10165,7 +10220,7 @@ class ChatPersistence {
|
|
|
10165
10220
|
// Convert date strings back to Date objects
|
|
10166
10221
|
return serializableMessages.map((message) => ({
|
|
10167
10222
|
...message,
|
|
10168
|
-
|
|
10223
|
+
createdAt: new Date(message.createdAt),
|
|
10169
10224
|
}));
|
|
10170
10225
|
}
|
|
10171
10226
|
catch (error) {
|
|
@@ -10218,7 +10273,7 @@ function LlmChat(props) {
|
|
|
10218
10273
|
const { llmTools, persistenceKey, onChange, onReset, initialMessages, sendMessage, userParticipantName = 'USER', llmParticipantName = 'ASSISTANT', autoExecuteMessage, buttonColor, ...restProps } = props;
|
|
10219
10274
|
// Internal state management
|
|
10220
10275
|
// DRY: Single factory for seeding initial messages (used on mount and after reset)
|
|
10221
|
-
const buildInitialMessages = useCallback(() =>
|
|
10276
|
+
const buildInitialMessages = useCallback(() => initialMessages ? ([...initialMessages]) : ([]), [initialMessages]);
|
|
10222
10277
|
const [messages, setMessages] = useState(() => buildInitialMessages());
|
|
10223
10278
|
const [tasksProgress, setTasksProgress] = useState([]);
|
|
10224
10279
|
const [isVoiceCalling, setIsVoiceCalling] = useState(false);
|
|
@@ -10311,17 +10366,19 @@ function LlmChat(props) {
|
|
|
10311
10366
|
setTasksProgress([{ id: taskId, name: 'Playing response...', progress: 100 }]);
|
|
10312
10367
|
const now = Date.now();
|
|
10313
10368
|
const userMessage = {
|
|
10369
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10314
10370
|
id: `user_${now}`,
|
|
10315
|
-
|
|
10316
|
-
|
|
10371
|
+
createdAt: new Date(),
|
|
10372
|
+
sender: userParticipantName,
|
|
10317
10373
|
content: (result.userMessage || '(Voice message)'),
|
|
10318
10374
|
isComplete: true,
|
|
10319
10375
|
isVoiceCall: true,
|
|
10320
10376
|
};
|
|
10321
10377
|
const agentMessage = {
|
|
10378
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10322
10379
|
id: `agent_${now}`,
|
|
10323
|
-
|
|
10324
|
-
|
|
10380
|
+
createdAt: new Date(),
|
|
10381
|
+
sender: llmParticipantName,
|
|
10325
10382
|
content: (result.agentMessage || result.text),
|
|
10326
10383
|
isComplete: true,
|
|
10327
10384
|
isVoiceCall: true,
|
|
@@ -10418,9 +10475,10 @@ function LlmChat(props) {
|
|
|
10418
10475
|
hasUserInteractedRef.current = true;
|
|
10419
10476
|
// Add user message
|
|
10420
10477
|
const userMessage = {
|
|
10478
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10421
10479
|
id: `user_${Date.now()}`,
|
|
10422
|
-
|
|
10423
|
-
|
|
10480
|
+
createdAt: new Date(),
|
|
10481
|
+
sender: userParticipantName,
|
|
10424
10482
|
content: messageContent,
|
|
10425
10483
|
isComplete: true,
|
|
10426
10484
|
};
|
|
@@ -10432,9 +10490,10 @@ function LlmChat(props) {
|
|
|
10432
10490
|
}
|
|
10433
10491
|
// Add loading message for assistant
|
|
10434
10492
|
const loadingMessage = {
|
|
10493
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10435
10494
|
id: `assistant_${Date.now()}`,
|
|
10436
|
-
|
|
10437
|
-
|
|
10495
|
+
createdAt: new Date(),
|
|
10496
|
+
sender: llmParticipantName,
|
|
10438
10497
|
content: 'Thinking...',
|
|
10439
10498
|
isComplete: false,
|
|
10440
10499
|
};
|
|
@@ -10465,9 +10524,10 @@ function LlmChat(props) {
|
|
|
10465
10524
|
if (llmTools.callChatModelStream) {
|
|
10466
10525
|
result = await llmTools.callChatModelStream(prompt, (chunk) => {
|
|
10467
10526
|
const assistantMessage = {
|
|
10527
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10468
10528
|
id: loadingMessage.id,
|
|
10469
|
-
|
|
10470
|
-
|
|
10529
|
+
createdAt: new Date(),
|
|
10530
|
+
sender: llmParticipantName,
|
|
10471
10531
|
content: chunk.content,
|
|
10472
10532
|
isComplete: false,
|
|
10473
10533
|
};
|
|
@@ -10488,9 +10548,10 @@ function LlmChat(props) {
|
|
|
10488
10548
|
setTasksProgress([{ id: taskId, name: 'Response generated', progress: 100 }]);
|
|
10489
10549
|
// Replace loading message with actual response
|
|
10490
10550
|
const assistantMessage = {
|
|
10551
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10491
10552
|
id: loadingMessage.id,
|
|
10492
|
-
|
|
10493
|
-
|
|
10553
|
+
createdAt: new Date(),
|
|
10554
|
+
sender: llmParticipantName,
|
|
10494
10555
|
content: result.content,
|
|
10495
10556
|
isComplete: true,
|
|
10496
10557
|
};
|
|
@@ -10509,9 +10570,10 @@ function LlmChat(props) {
|
|
|
10509
10570
|
console.error('Error calling LLM:', error);
|
|
10510
10571
|
// Replace loading message with error message
|
|
10511
10572
|
const errorMessage = {
|
|
10573
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10512
10574
|
id: loadingMessage.id,
|
|
10513
|
-
|
|
10514
|
-
|
|
10575
|
+
createdAt: new Date(),
|
|
10576
|
+
sender: llmParticipantName,
|
|
10515
10577
|
content: `Sorry, I encountered an error: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
|
10516
10578
|
isComplete: true,
|
|
10517
10579
|
};
|
|
@@ -10578,7 +10640,8 @@ function AgentChat(props) {
|
|
|
10578
10640
|
return (jsx(Fragment, { children: jsx(LlmChat, { title: title || `Chat with ${agent.meta.fullname || agent.agentName || 'Agent'}`, persistenceKey: persistenceKey || `agent-chat-${agent.agentName}`, userParticipantName: "USER", llmParticipantName: "AGENT" // <- TODO: [🧠] Maybe dynamic agent id
|
|
10579
10641
|
, initialMessages: [
|
|
10580
10642
|
{
|
|
10581
|
-
|
|
10643
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10644
|
+
sender: 'AGENT',
|
|
10582
10645
|
content: agent.initialMessage ||
|
|
10583
10646
|
spaceTrim$2(`
|
|
10584
10647
|
|
|
@@ -10856,7 +10919,7 @@ function MockedChat(props) {
|
|
|
10856
10919
|
if (delays.longPauseChance &&
|
|
10857
10920
|
Math.random() < delays.longPauseChance &&
|
|
10858
10921
|
i > 0 &&
|
|
10859
|
-
originalMessages[i].
|
|
10922
|
+
originalMessages[i].sender !== originalMessages[i - 1].sender) {
|
|
10860
10923
|
await forTime(getDelay(delays.longPauseDuration, 2000));
|
|
10861
10924
|
didLongPause = true;
|
|
10862
10925
|
if (isCancelled)
|
|
@@ -10877,9 +10940,10 @@ function MockedChat(props) {
|
|
|
10877
10940
|
}
|
|
10878
10941
|
// Show incomplete message first (for typing effect)
|
|
10879
10942
|
const incompleteMessage = {
|
|
10943
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10880
10944
|
id: currentMessage.id,
|
|
10881
|
-
|
|
10882
|
-
|
|
10945
|
+
createdAt: currentMessage.createdAt,
|
|
10946
|
+
sender: currentMessage.sender,
|
|
10883
10947
|
content: '',
|
|
10884
10948
|
isComplete: false,
|
|
10885
10949
|
expectedAnswer: currentMessage.expectedAnswer,
|
|
@@ -10897,9 +10961,10 @@ function MockedChat(props) {
|
|
|
10897
10961
|
currentContent += (wordIndex > 0 ? ' ' : '') + word;
|
|
10898
10962
|
// Update the message with current content
|
|
10899
10963
|
const updatingMessage = {
|
|
10964
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10900
10965
|
id: currentMessage.id,
|
|
10901
|
-
|
|
10902
|
-
|
|
10966
|
+
createdAt: currentMessage.createdAt,
|
|
10967
|
+
sender: currentMessage.sender,
|
|
10903
10968
|
content: currentContent,
|
|
10904
10969
|
isComplete: false,
|
|
10905
10970
|
expectedAnswer: currentMessage.expectedAnswer,
|
|
@@ -10917,9 +10982,10 @@ function MockedChat(props) {
|
|
|
10917
10982
|
}
|
|
10918
10983
|
// Mark message as complete
|
|
10919
10984
|
const completeMessage = {
|
|
10985
|
+
// channel: 'PROMPTBOOK_CHAT',
|
|
10920
10986
|
id: currentMessage.id,
|
|
10921
|
-
|
|
10922
|
-
|
|
10987
|
+
createdAt: currentMessage.createdAt,
|
|
10988
|
+
sender: currentMessage.sender,
|
|
10923
10989
|
content: currentMessage.content,
|
|
10924
10990
|
isComplete: true,
|
|
10925
10991
|
expectedAnswer: currentMessage.expectedAnswer,
|
|
@@ -11171,6 +11237,12 @@ class MultipleLlmExecutionTools {
|
|
|
11171
11237
|
callEmbeddingModel(prompt) {
|
|
11172
11238
|
return this.callCommonModel(prompt);
|
|
11173
11239
|
}
|
|
11240
|
+
/**
|
|
11241
|
+
* Calls the best available embedding model
|
|
11242
|
+
*/
|
|
11243
|
+
callImageGenerationModel(prompt) {
|
|
11244
|
+
return this.callCommonModel(prompt);
|
|
11245
|
+
}
|
|
11174
11246
|
// <- Note: [🤖]
|
|
11175
11247
|
/**
|
|
11176
11248
|
* Calls the best available model
|
|
@@ -11197,6 +11269,11 @@ class MultipleLlmExecutionTools {
|
|
|
11197
11269
|
continue llm;
|
|
11198
11270
|
}
|
|
11199
11271
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
|
11272
|
+
case 'IMAGE_GENERATION':
|
|
11273
|
+
if (llmExecutionTools.callImageGenerationModel === undefined) {
|
|
11274
|
+
continue llm;
|
|
11275
|
+
}
|
|
11276
|
+
return await llmExecutionTools.callImageGenerationModel(prompt);
|
|
11200
11277
|
// <- case [🤖]:
|
|
11201
11278
|
default:
|
|
11202
11279
|
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
|
@@ -12371,6 +12448,15 @@ function countUsage(llmTools) {
|
|
|
12371
12448
|
return promptResult;
|
|
12372
12449
|
};
|
|
12373
12450
|
}
|
|
12451
|
+
if (llmTools.callImageGenerationModel !== undefined) {
|
|
12452
|
+
proxyTools.callImageGenerationModel = async (prompt) => {
|
|
12453
|
+
// console.info('[🚕] callImageGenerationModel through countTotalUsage');
|
|
12454
|
+
const promptResult = await llmTools.callImageGenerationModel(prompt);
|
|
12455
|
+
totalUsage = addUsage(totalUsage, promptResult.usage);
|
|
12456
|
+
spending.next(promptResult.usage);
|
|
12457
|
+
return promptResult;
|
|
12458
|
+
};
|
|
12459
|
+
}
|
|
12374
12460
|
// <- Note: [🤖]
|
|
12375
12461
|
return proxyTools;
|
|
12376
12462
|
}
|
|
@@ -13914,8 +14000,9 @@ async function executeAttempts(options) {
|
|
|
13914
14000
|
$ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
|
|
13915
14001
|
break variant;
|
|
13916
14002
|
case 'EMBEDDING':
|
|
14003
|
+
case 'IMAGE_GENERATION':
|
|
13917
14004
|
throw new PipelineExecutionError(spaceTrim$1((block) => `
|
|
13918
|
-
|
|
14005
|
+
${modelRequirements.modelVariant} model can not be used in pipeline
|
|
13919
14006
|
|
|
13920
14007
|
This should be catched during parsing
|
|
13921
14008
|
|
|
@@ -16246,7 +16333,7 @@ class OpenAiCompatibleExecutionTools {
|
|
|
16246
16333
|
let threadMessages = [];
|
|
16247
16334
|
if ('thread' in prompt && Array.isArray(prompt.thread)) {
|
|
16248
16335
|
threadMessages = prompt.thread.map((msg) => ({
|
|
16249
|
-
role: msg.
|
|
16336
|
+
role: msg.sender === 'assistant' ? 'assistant' : 'user',
|
|
16250
16337
|
content: msg.content,
|
|
16251
16338
|
}));
|
|
16252
16339
|
}
|