@lobehub/lobehub 2.0.0-next.115 → 2.0.0-next.117
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/package.json +1 -1
- package/packages/context-engine/src/processors/MessageContent.ts +100 -6
- package/packages/context-engine/src/processors/__tests__/MessageContent.test.ts +239 -0
- package/packages/fetch-sse/src/fetchSSE.ts +30 -0
- package/packages/model-bank/src/aiModels/bedrock.ts +30 -2
- package/packages/model-runtime/src/const/models.ts +62 -24
- package/packages/model-runtime/src/core/contextBuilders/anthropic.ts +14 -0
- package/packages/model-runtime/src/core/contextBuilders/google.test.ts +78 -24
- package/packages/model-runtime/src/core/contextBuilders/google.ts +10 -2
- package/packages/model-runtime/src/core/parameterResolver.test.ts +34 -50
- package/packages/model-runtime/src/core/parameterResolver.ts +0 -41
- package/packages/model-runtime/src/core/streams/google/google-ai.test.ts +451 -20
- package/packages/model-runtime/src/core/streams/google/index.ts +113 -3
- package/packages/model-runtime/src/core/streams/protocol.ts +19 -0
- package/packages/model-runtime/src/index.ts +1 -0
- package/packages/model-runtime/src/providers/anthropic/index.ts +20 -32
- package/packages/model-runtime/src/providers/anthropic/resolveMaxTokens.ts +35 -0
- package/packages/model-runtime/src/providers/bedrock/index.test.ts +5 -7
- package/packages/model-runtime/src/providers/bedrock/index.ts +50 -11
- package/packages/types/src/message/common/base.ts +26 -0
- package/packages/types/src/message/common/metadata.ts +7 -0
- package/packages/utils/src/index.ts +1 -0
- package/packages/utils/src/multimodalContent.ts +25 -0
- package/src/components/Thinking/index.tsx +3 -3
- package/src/features/ChatList/Messages/Assistant/DisplayContent.tsx +44 -0
- package/src/features/ChatList/Messages/Assistant/MessageBody.tsx +96 -0
- package/src/features/ChatList/Messages/Assistant/Reasoning/index.tsx +26 -13
- package/src/features/ChatList/Messages/Assistant/index.tsx +8 -6
- package/src/features/ChatList/Messages/Default.tsx +4 -7
- package/src/features/ChatList/components/RichContentRenderer.tsx +35 -0
- package/src/store/agent/slices/chat/selectors/chatConfig.ts +4 -3
- package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +244 -17
- package/packages/const/src/models.ts +0 -93
- package/src/features/ChatList/Messages/Assistant/MessageContent.tsx +0 -78
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
import { MessageContentPart } from '@lobechat/types';
|
|
2
|
+
import { deserializeParts } from '@lobechat/utils';
|
|
1
3
|
import { memo } from 'react';
|
|
2
4
|
|
|
3
5
|
import Thinking from '@/components/Thinking';
|
|
@@ -6,24 +8,35 @@ import { aiChatSelectors } from '@/store/chat/selectors';
|
|
|
6
8
|
import { useUserStore } from '@/store/user';
|
|
7
9
|
import { userGeneralSettingsSelectors } from '@/store/user/selectors';
|
|
8
10
|
|
|
11
|
+
import { RichContentRenderer } from '../../../components/RichContentRenderer';
|
|
12
|
+
|
|
9
13
|
interface ReasoningProps {
|
|
10
14
|
content?: string;
|
|
11
15
|
duration?: number;
|
|
12
16
|
id: string;
|
|
17
|
+
isMultimodal?: boolean;
|
|
18
|
+
tempDisplayContent?: MessageContentPart[];
|
|
13
19
|
}
|
|
14
20
|
|
|
15
|
-
const Reasoning = memo<ReasoningProps>(
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
}
|
|
21
|
+
const Reasoning = memo<ReasoningProps>(
|
|
22
|
+
({ content = '', duration, id, isMultimodal, tempDisplayContent }) => {
|
|
23
|
+
const isReasoning = useChatStore(aiChatSelectors.isMessageInReasoning(id));
|
|
24
|
+
const transitionMode = useUserStore(userGeneralSettingsSelectors.transitionMode);
|
|
25
|
+
|
|
26
|
+
const parts = tempDisplayContent || deserializeParts(content);
|
|
27
|
+
|
|
28
|
+
// If parts are provided, render multimodal content
|
|
29
|
+
const thinkingContent = isMultimodal && parts ? <RichContentRenderer parts={parts} /> : content;
|
|
30
|
+
|
|
31
|
+
return (
|
|
32
|
+
<Thinking
|
|
33
|
+
content={thinkingContent}
|
|
34
|
+
duration={duration}
|
|
35
|
+
thinking={isReasoning}
|
|
36
|
+
thinkingAnimated={transitionMode === 'fadeIn' && isReasoning}
|
|
37
|
+
/>
|
|
38
|
+
);
|
|
39
|
+
},
|
|
40
|
+
);
|
|
28
41
|
|
|
29
42
|
export default Reasoning;
|
|
@@ -33,7 +33,7 @@ import { useDoubleClickEdit } from '../../hooks/useDoubleClickEdit';
|
|
|
33
33
|
import { normalizeThinkTags, processWithArtifact } from '../../utils/markdown';
|
|
34
34
|
import { AssistantActionsBar } from './Actions';
|
|
35
35
|
import { AssistantMessageExtra } from './Extra';
|
|
36
|
-
import {
|
|
36
|
+
import { AssistantMessageBody } from './MessageBody';
|
|
37
37
|
|
|
38
38
|
const rehypePlugins = markdownElements.map((element) => element.rehypePlugin).filter(Boolean);
|
|
39
39
|
const remarkPlugins = markdownElements.map((element) => element.remarkPlugin).filter(Boolean);
|
|
@@ -75,7 +75,7 @@ export const useStyles = createStyles(
|
|
|
75
75
|
justify-content: ${placement === 'left' ? 'flex-end' : 'flex-start'};
|
|
76
76
|
`,
|
|
77
77
|
editing &&
|
|
78
|
-
|
|
78
|
+
css`
|
|
79
79
|
pointer-events: none !important;
|
|
80
80
|
opacity: 0 !important;
|
|
81
81
|
`,
|
|
@@ -84,11 +84,9 @@ export const useStyles = createStyles(
|
|
|
84
84
|
variant === 'docs' && rawContainerStylish,
|
|
85
85
|
css`
|
|
86
86
|
position: relative;
|
|
87
|
-
|
|
88
87
|
width: 100%;
|
|
89
88
|
max-width: 100vw;
|
|
90
89
|
padding-block: 24px 12px;
|
|
91
|
-
padding-inline: 12px;
|
|
92
90
|
|
|
93
91
|
@supports (content-visibility: auto) {
|
|
94
92
|
contain-intrinsic-size: auto 100lvh;
|
|
@@ -305,9 +303,13 @@ const AssistantMessage = memo<AssistantMessageProps>(
|
|
|
305
303
|
|
|
306
304
|
const renderMessage = useCallback(
|
|
307
305
|
(editableContent: ReactNode) => (
|
|
308
|
-
<
|
|
306
|
+
<AssistantMessageBody
|
|
307
|
+
{...item}
|
|
308
|
+
editableContent={editableContent}
|
|
309
|
+
markdownProps={markdownProps}
|
|
310
|
+
/>
|
|
309
311
|
),
|
|
310
|
-
[item],
|
|
312
|
+
[item, markdownProps],
|
|
311
313
|
);
|
|
312
314
|
const errorMessage = <ErrorMessageExtra data={item} />;
|
|
313
315
|
|
|
@@ -6,25 +6,22 @@ import { LOADING_FLAT } from '@/const/message';
|
|
|
6
6
|
import { useChatStore } from '@/store/chat';
|
|
7
7
|
import { messageStateSelectors } from '@/store/chat/selectors';
|
|
8
8
|
|
|
9
|
-
export const MessageContentClassName = 'msg_content_flag'
|
|
9
|
+
export const MessageContentClassName = 'msg_content_flag';
|
|
10
10
|
|
|
11
11
|
export const DefaultMessage = memo<
|
|
12
12
|
UIChatMessage & {
|
|
13
13
|
addIdOnDOM?: boolean;
|
|
14
14
|
editableContent: ReactNode;
|
|
15
|
+
hasImages?: boolean;
|
|
15
16
|
isToolCallGenerating?: boolean;
|
|
16
17
|
}
|
|
17
|
-
>(({ id, editableContent, content, isToolCallGenerating, addIdOnDOM = true }) => {
|
|
18
|
+
>(({ id, editableContent, content, isToolCallGenerating, addIdOnDOM = true, hasImages }) => {
|
|
18
19
|
const editing = useChatStore(messageStateSelectors.isMessageEditing(id));
|
|
19
20
|
|
|
20
21
|
if (isToolCallGenerating) return;
|
|
21
22
|
|
|
22
|
-
if (!content) return <BubblesLoading />;
|
|
23
|
+
if (!content && !hasImages) return <BubblesLoading />;
|
|
23
24
|
if (content === LOADING_FLAT && !editing) return <BubblesLoading />;
|
|
24
25
|
|
|
25
26
|
return <div id={addIdOnDOM ? id : undefined}>{editableContent}</div>;
|
|
26
27
|
});
|
|
27
|
-
|
|
28
|
-
export const DefaultBelowMessage = memo<UIChatMessage>(() => {
|
|
29
|
-
return null;
|
|
30
|
-
});
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { Image, Markdown } from '@lobehub/ui';
|
|
2
|
+
import { memo } from 'react';
|
|
3
|
+
import { Flexbox } from 'react-layout-kit';
|
|
4
|
+
|
|
5
|
+
import { MessageContentPart } from '@/types/index';
|
|
6
|
+
|
|
7
|
+
interface RichContentRendererProps {
|
|
8
|
+
parts: MessageContentPart[];
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export const RichContentRenderer = memo<RichContentRendererProps>(({ parts }) => {
|
|
12
|
+
return (
|
|
13
|
+
<Flexbox gap={8}>
|
|
14
|
+
{parts.map((part, index) => {
|
|
15
|
+
if (part.type === 'text') {
|
|
16
|
+
return (
|
|
17
|
+
<Markdown key={index} variant="chat">
|
|
18
|
+
{part.text}
|
|
19
|
+
</Markdown>
|
|
20
|
+
);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
if (part.type === 'image') {
|
|
24
|
+
return (
|
|
25
|
+
<Image key={index} src={part.image} style={{ borderRadius: 8, maxWidth: '100%' }} />
|
|
26
|
+
);
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
return null;
|
|
30
|
+
})}
|
|
31
|
+
</Flexbox>
|
|
32
|
+
);
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
RichContentRenderer.displayName = 'RichContentRenderer';
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { isContextCachingModel, isThinkingWithToolClaudeModel } from '@lobechat/model-runtime';
|
|
2
|
+
|
|
2
3
|
import { DEFAULT_AGENT_CHAT_CONFIG, DEFAULT_AGENT_SEARCH_FC_MODEL } from '@/const/settings';
|
|
3
4
|
import { AgentStoreState } from '@/store/agent/initialState';
|
|
4
5
|
import { LobeAgentChatConfig } from '@/types/agent';
|
|
@@ -24,12 +25,12 @@ const enableHistoryCount = (s: AgentStoreState) => {
|
|
|
24
25
|
// 如果开启了上下文缓存,且当前模型类型匹配,则不开启历史记录
|
|
25
26
|
const enableContextCaching = !chatConfig.disableContextCaching;
|
|
26
27
|
|
|
27
|
-
if (enableContextCaching &&
|
|
28
|
+
if (enableContextCaching && isContextCachingModel(config.model)) return false;
|
|
28
29
|
|
|
29
30
|
// 当开启搜索时,针对 claude 3.7 sonnet 模型不开启历史记录
|
|
30
31
|
const enableSearch = isAgentEnableSearch(s);
|
|
31
32
|
|
|
32
|
-
if (enableSearch &&
|
|
33
|
+
if (enableSearch && isThinkingWithToolClaudeModel(config.model)) return false;
|
|
33
34
|
|
|
34
35
|
return chatConfig.enableHistoryCount;
|
|
35
36
|
};
|
|
@@ -5,14 +5,17 @@ import { isDesktop } from '@lobechat/const';
|
|
|
5
5
|
import {
|
|
6
6
|
ChatImageItem,
|
|
7
7
|
ChatToolPayload,
|
|
8
|
+
MessageContentPart,
|
|
8
9
|
MessageToolCall,
|
|
9
10
|
ModelUsage,
|
|
10
11
|
TraceNameMap,
|
|
11
12
|
UIChatMessage,
|
|
12
13
|
} from '@lobechat/types';
|
|
14
|
+
import { serializePartsForStorage } from '@lobechat/utils';
|
|
13
15
|
import debug from 'debug';
|
|
14
16
|
import { t } from 'i18next';
|
|
15
17
|
import { throttle } from 'lodash-es';
|
|
18
|
+
import pMap from 'p-map';
|
|
16
19
|
import { StateCreator } from 'zustand/vanilla';
|
|
17
20
|
|
|
18
21
|
import { createAgentToolsEngine } from '@/helpers/toolEngineering';
|
|
@@ -272,14 +275,21 @@ export const streamingExecutor: StateCreator<
|
|
|
272
275
|
let finalUsage;
|
|
273
276
|
let msgTraceId: string | undefined;
|
|
274
277
|
let output = '';
|
|
275
|
-
|
|
278
|
+
|
|
279
|
+
let thinkingContent = '';
|
|
276
280
|
let thinkingStartAt: number;
|
|
277
|
-
let
|
|
281
|
+
let thinkingDuration: number | undefined;
|
|
278
282
|
let reasoningOperationId: string | undefined;
|
|
279
283
|
let finishType: string | undefined;
|
|
280
284
|
// to upload image
|
|
281
285
|
const uploadTasks: Map<string, Promise<{ id?: string; url?: string }>> = new Map();
|
|
282
286
|
|
|
287
|
+
// Multimodal content parts
|
|
288
|
+
let contentParts: MessageContentPart[] = [];
|
|
289
|
+
let reasoningParts: MessageContentPart[] = [];
|
|
290
|
+
const contentImageUploads: Map<number, Promise<string>> = new Map();
|
|
291
|
+
const reasoningImageUploads: Map<number, Promise<string>> = new Map();
|
|
292
|
+
|
|
283
293
|
// Throttle tool_calls updates to prevent excessive re-renders (max once per 300ms)
|
|
284
294
|
const throttledUpdateToolCalls = throttle(
|
|
285
295
|
(toolCalls: MessageToolCall[]) => {
|
|
@@ -344,7 +354,9 @@ export const streamingExecutor: StateCreator<
|
|
|
344
354
|
if (uploadTasks.size > 0) {
|
|
345
355
|
try {
|
|
346
356
|
// 等待所有上传任务完成
|
|
347
|
-
const uploadResults = await
|
|
357
|
+
const uploadResults = await pMap(Array.from(uploadTasks.values()), (task) => task, {
|
|
358
|
+
concurrency: 5,
|
|
359
|
+
});
|
|
348
360
|
|
|
349
361
|
// 使用上传后的 S3 URL 替换原始图像数据
|
|
350
362
|
finalImages = uploadResults.filter((i) => !!i.url) as ChatImageItem[];
|
|
@@ -353,6 +365,14 @@ export const streamingExecutor: StateCreator<
|
|
|
353
365
|
}
|
|
354
366
|
}
|
|
355
367
|
|
|
368
|
+
// Wait for all multimodal image uploads to complete
|
|
369
|
+
// Note: Arrays are already updated in-place when uploads complete
|
|
370
|
+
// Use Promise.allSettled to continue even if some uploads fail
|
|
371
|
+
await Promise.allSettled([
|
|
372
|
+
...Array.from(contentImageUploads.values()),
|
|
373
|
+
...Array.from(reasoningImageUploads.values()),
|
|
374
|
+
]);
|
|
375
|
+
|
|
356
376
|
let parsedToolCalls = toolCalls;
|
|
357
377
|
if (parsedToolCalls && parsedToolCalls.length > 0) {
|
|
358
378
|
// Flush any pending throttled updates before finalizing
|
|
@@ -384,18 +404,58 @@ export const streamingExecutor: StateCreator<
|
|
|
384
404
|
operationId,
|
|
385
405
|
);
|
|
386
406
|
|
|
407
|
+
// Check if there are any image parts
|
|
408
|
+
const hasContentImages = contentParts.some((part) => part.type === 'image');
|
|
409
|
+
const hasReasoningImages = reasoningParts.some((part) => part.type === 'image');
|
|
410
|
+
|
|
411
|
+
// Determine final content
|
|
412
|
+
// If has images, serialize contentParts; otherwise use accumulated output text
|
|
413
|
+
const finalContent = hasContentImages ? serializePartsForStorage(contentParts) : output;
|
|
414
|
+
|
|
415
|
+
const finalDuration =
|
|
416
|
+
thinkingDuration && !isNaN(thinkingDuration) ? thinkingDuration : undefined;
|
|
417
|
+
|
|
418
|
+
// Determine final reasoning content
|
|
419
|
+
// Priority: reasoningParts (multimodal) > thinkingContent (from reasoning_part text) > reasoning (from old reasoning event)
|
|
420
|
+
let finalReasoning: any = undefined;
|
|
421
|
+
if (hasReasoningImages) {
|
|
422
|
+
// Has images, use multimodal format
|
|
423
|
+
finalReasoning = {
|
|
424
|
+
content: serializePartsForStorage(reasoningParts),
|
|
425
|
+
duration: finalDuration,
|
|
426
|
+
isMultimodal: true,
|
|
427
|
+
};
|
|
428
|
+
} else if (thinkingContent) {
|
|
429
|
+
// Has text from reasoning_part but no images
|
|
430
|
+
finalReasoning = {
|
|
431
|
+
content: thinkingContent,
|
|
432
|
+
duration: finalDuration,
|
|
433
|
+
};
|
|
434
|
+
} else if (reasoning?.content) {
|
|
435
|
+
// Fallback to old reasoning event content
|
|
436
|
+
finalReasoning = {
|
|
437
|
+
...reasoning,
|
|
438
|
+
duration: finalDuration,
|
|
439
|
+
};
|
|
440
|
+
}
|
|
441
|
+
|
|
387
442
|
// update the content after fetch result
|
|
388
443
|
await optimisticUpdateMessageContent(
|
|
389
444
|
messageId,
|
|
390
|
-
|
|
445
|
+
finalContent,
|
|
391
446
|
{
|
|
392
447
|
tools,
|
|
393
|
-
reasoning:
|
|
394
|
-
? { ...reasoning, duration: duration && !isNaN(duration) ? duration : undefined }
|
|
395
|
-
: undefined,
|
|
448
|
+
reasoning: finalReasoning,
|
|
396
449
|
search: !!grounding?.citations ? grounding : undefined,
|
|
397
450
|
imageList: finalImages.length > 0 ? finalImages : undefined,
|
|
398
|
-
metadata: {
|
|
451
|
+
metadata: {
|
|
452
|
+
...usage,
|
|
453
|
+
...speed,
|
|
454
|
+
performance: speed,
|
|
455
|
+
usage,
|
|
456
|
+
finishType: type,
|
|
457
|
+
...(hasContentImages && { isMultimodal: true }),
|
|
458
|
+
},
|
|
399
459
|
},
|
|
400
460
|
{ operationId },
|
|
401
461
|
);
|
|
@@ -457,8 +517,8 @@ export const streamingExecutor: StateCreator<
|
|
|
457
517
|
output += chunk.text;
|
|
458
518
|
|
|
459
519
|
// if there is no duration, it means the end of reasoning
|
|
460
|
-
if (!
|
|
461
|
-
|
|
520
|
+
if (!thinkingDuration) {
|
|
521
|
+
thinkingDuration = Date.now() - thinkingStartAt;
|
|
462
522
|
|
|
463
523
|
// Complete reasoning operation if it exists
|
|
464
524
|
if (reasoningOperationId) {
|
|
@@ -480,7 +540,9 @@ export const streamingExecutor: StateCreator<
|
|
|
480
540
|
type: 'updateMessage',
|
|
481
541
|
value: {
|
|
482
542
|
content: output,
|
|
483
|
-
reasoning: !!
|
|
543
|
+
reasoning: !!thinkingContent
|
|
544
|
+
? { content: thinkingContent, duration: thinkingDuration }
|
|
545
|
+
: undefined,
|
|
484
546
|
},
|
|
485
547
|
},
|
|
486
548
|
{ operationId },
|
|
@@ -505,13 +567,178 @@ export const streamingExecutor: StateCreator<
|
|
|
505
567
|
get().associateMessageWithOperation(messageId, reasoningOperationId);
|
|
506
568
|
}
|
|
507
569
|
|
|
508
|
-
|
|
570
|
+
thinkingContent += chunk.text;
|
|
509
571
|
|
|
510
572
|
internal_dispatchMessage(
|
|
511
573
|
{
|
|
512
574
|
id: messageId,
|
|
513
575
|
type: 'updateMessage',
|
|
514
|
-
value: { reasoning: { content:
|
|
576
|
+
value: { reasoning: { content: thinkingContent } },
|
|
577
|
+
},
|
|
578
|
+
{ operationId },
|
|
579
|
+
);
|
|
580
|
+
break;
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
case 'reasoning_part': {
|
|
584
|
+
// Start reasoning if not started
|
|
585
|
+
if (!thinkingStartAt) {
|
|
586
|
+
thinkingStartAt = Date.now();
|
|
587
|
+
|
|
588
|
+
const { operationId: reasoningOpId } = get().startOperation({
|
|
589
|
+
type: 'reasoning',
|
|
590
|
+
context: { sessionId, topicId, messageId },
|
|
591
|
+
parentOperationId: operationId,
|
|
592
|
+
});
|
|
593
|
+
reasoningOperationId = reasoningOpId;
|
|
594
|
+
get().associateMessageWithOperation(messageId, reasoningOperationId);
|
|
595
|
+
}
|
|
596
|
+
|
|
597
|
+
const { partType, content: partContent, mimeType } = chunk;
|
|
598
|
+
|
|
599
|
+
if (partType === 'text') {
|
|
600
|
+
const lastPart = reasoningParts.at(-1);
|
|
601
|
+
|
|
602
|
+
// If last part is also text, merge chunks together
|
|
603
|
+
if (lastPart?.type === 'text') {
|
|
604
|
+
reasoningParts = [
|
|
605
|
+
...reasoningParts.slice(0, -1),
|
|
606
|
+
{ type: 'text', text: lastPart.text + partContent },
|
|
607
|
+
];
|
|
608
|
+
} else {
|
|
609
|
+
// Create new text part (first chunk, may contain thoughtSignature)
|
|
610
|
+
reasoningParts = [...reasoningParts, { type: 'text', text: partContent }];
|
|
611
|
+
}
|
|
612
|
+
thinkingContent += partContent;
|
|
613
|
+
} else if (partType === 'image') {
|
|
614
|
+
// Image part - create new array to avoid mutation
|
|
615
|
+
const tempImage = `data:${mimeType};base64,${partContent}`;
|
|
616
|
+
const partIndex = reasoningParts.length;
|
|
617
|
+
const newPart: MessageContentPart = { type: 'image', image: tempImage };
|
|
618
|
+
reasoningParts = [...reasoningParts, newPart];
|
|
619
|
+
|
|
620
|
+
// Start upload task and update array when done
|
|
621
|
+
const uploadTask = getFileStoreState()
|
|
622
|
+
.uploadBase64FileWithProgress(tempImage)
|
|
623
|
+
.then((file) => {
|
|
624
|
+
const url = file?.url || tempImage;
|
|
625
|
+
// Replace the part at index by creating a new array
|
|
626
|
+
const updatedParts = [...reasoningParts];
|
|
627
|
+
updatedParts[partIndex] = { type: 'image', image: url };
|
|
628
|
+
reasoningParts = updatedParts;
|
|
629
|
+
return url;
|
|
630
|
+
})
|
|
631
|
+
.catch((error) => {
|
|
632
|
+
console.error('[reasoning_part] Image upload failed:', error);
|
|
633
|
+
return tempImage;
|
|
634
|
+
});
|
|
635
|
+
|
|
636
|
+
reasoningImageUploads.set(partIndex, uploadTask);
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
// Real-time update with display format
|
|
640
|
+
// Check if there are any image parts to determine if it's multimodal
|
|
641
|
+
const hasReasoningImages = reasoningParts.some((part) => part.type === 'image');
|
|
642
|
+
|
|
643
|
+
internal_dispatchMessage(
|
|
644
|
+
{
|
|
645
|
+
id: messageId,
|
|
646
|
+
type: 'updateMessage',
|
|
647
|
+
value: {
|
|
648
|
+
reasoning: hasReasoningImages
|
|
649
|
+
? { tempDisplayContent: reasoningParts, isMultimodal: true }
|
|
650
|
+
: { content: thinkingContent },
|
|
651
|
+
},
|
|
652
|
+
},
|
|
653
|
+
{ operationId },
|
|
654
|
+
);
|
|
655
|
+
break;
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
case 'content_part': {
|
|
659
|
+
const { partType, content: partContent, mimeType } = chunk;
|
|
660
|
+
|
|
661
|
+
// End reasoning when content starts
|
|
662
|
+
if (!thinkingDuration && reasoningOperationId) {
|
|
663
|
+
thinkingDuration = Date.now() - thinkingStartAt;
|
|
664
|
+
get().completeOperation(reasoningOperationId);
|
|
665
|
+
reasoningOperationId = undefined;
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
if (partType === 'text') {
|
|
669
|
+
const lastPart = contentParts.at(-1);
|
|
670
|
+
|
|
671
|
+
// If last part is also text, merge chunks together
|
|
672
|
+
if (lastPart?.type === 'text') {
|
|
673
|
+
contentParts = [
|
|
674
|
+
...contentParts.slice(0, -1),
|
|
675
|
+
{ type: 'text', text: lastPart.text + partContent },
|
|
676
|
+
];
|
|
677
|
+
} else {
|
|
678
|
+
// Create new text part (first chunk, may contain thoughtSignature)
|
|
679
|
+
contentParts = [...contentParts, { type: 'text', text: partContent }];
|
|
680
|
+
}
|
|
681
|
+
output += partContent;
|
|
682
|
+
} else if (partType === 'image') {
|
|
683
|
+
// Image part - create new array to avoid mutation
|
|
684
|
+
const tempImage = `data:${mimeType};base64,${partContent}`;
|
|
685
|
+
const partIndex = contentParts.length;
|
|
686
|
+
const newPart: MessageContentPart = {
|
|
687
|
+
type: 'image',
|
|
688
|
+
image: tempImage,
|
|
689
|
+
};
|
|
690
|
+
contentParts = [...contentParts, newPart];
|
|
691
|
+
|
|
692
|
+
// Start upload task and update array when done
|
|
693
|
+
const uploadTask = getFileStoreState()
|
|
694
|
+
.uploadBase64FileWithProgress(tempImage)
|
|
695
|
+
.then((file) => {
|
|
696
|
+
const url = file?.url || tempImage;
|
|
697
|
+
// Replace the part at index by creating a new array
|
|
698
|
+
const updatedParts = [...contentParts];
|
|
699
|
+
updatedParts[partIndex] = {
|
|
700
|
+
type: 'image',
|
|
701
|
+
image: url,
|
|
702
|
+
};
|
|
703
|
+
contentParts = updatedParts;
|
|
704
|
+
return url;
|
|
705
|
+
})
|
|
706
|
+
.catch((error) => {
|
|
707
|
+
console.error('[content_part] Image upload failed:', error);
|
|
708
|
+
return tempImage;
|
|
709
|
+
});
|
|
710
|
+
|
|
711
|
+
contentImageUploads.set(partIndex, uploadTask);
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
// Real-time update with display format
|
|
715
|
+
// Check if there are any image parts to determine if it's multimodal
|
|
716
|
+
const hasContentImages = contentParts.some((part) => part.type === 'image');
|
|
717
|
+
|
|
718
|
+
const hasReasoningImages = reasoningParts.some((part) => part.type === 'image');
|
|
719
|
+
|
|
720
|
+
internal_dispatchMessage(
|
|
721
|
+
{
|
|
722
|
+
id: messageId,
|
|
723
|
+
type: 'updateMessage',
|
|
724
|
+
value: {
|
|
725
|
+
content: output,
|
|
726
|
+
reasoning: hasReasoningImages
|
|
727
|
+
? {
|
|
728
|
+
tempDisplayContent: reasoningParts,
|
|
729
|
+
isMultimodal: true,
|
|
730
|
+
duration: thinkingDuration,
|
|
731
|
+
}
|
|
732
|
+
: !!thinkingContent
|
|
733
|
+
? { content: thinkingContent, duration: thinkingDuration }
|
|
734
|
+
: undefined,
|
|
735
|
+
...(hasContentImages && {
|
|
736
|
+
metadata: {
|
|
737
|
+
isMultimodal: true,
|
|
738
|
+
tempDisplayContent: serializePartsForStorage(contentParts),
|
|
739
|
+
},
|
|
740
|
+
}),
|
|
741
|
+
},
|
|
515
742
|
},
|
|
516
743
|
{ operationId },
|
|
517
744
|
);
|
|
@@ -525,8 +752,8 @@ export const streamingExecutor: StateCreator<
|
|
|
525
752
|
isFunctionCall = true;
|
|
526
753
|
|
|
527
754
|
// Complete reasoning operation if it exists
|
|
528
|
-
if (!
|
|
529
|
-
|
|
755
|
+
if (!thinkingDuration && reasoningOperationId) {
|
|
756
|
+
thinkingDuration = Date.now() - thinkingStartAt;
|
|
530
757
|
get().completeOperation(reasoningOperationId);
|
|
531
758
|
reasoningOperationId = undefined;
|
|
532
759
|
}
|
|
@@ -535,8 +762,8 @@ export const streamingExecutor: StateCreator<
|
|
|
535
762
|
|
|
536
763
|
case 'stop': {
|
|
537
764
|
// Complete reasoning operation when receiving stop signal
|
|
538
|
-
if (!
|
|
539
|
-
|
|
765
|
+
if (!thinkingDuration && reasoningOperationId) {
|
|
766
|
+
thinkingDuration = Date.now() - thinkingStartAt;
|
|
540
767
|
get().completeOperation(reasoningOperationId);
|
|
541
768
|
reasoningOperationId = undefined;
|
|
542
769
|
}
|
|
@@ -1,93 +0,0 @@
|
|
|
1
|
-
export const systemToUserModels = new Set([
|
|
2
|
-
'o1-preview',
|
|
3
|
-
'o1-preview-2024-09-12',
|
|
4
|
-
'o1-mini',
|
|
5
|
-
'o1-mini-2024-09-12',
|
|
6
|
-
]);
|
|
7
|
-
|
|
8
|
-
// TODO: temporary implementation, needs to be refactored into model card display configuration
|
|
9
|
-
export const disableStreamModels = new Set([
|
|
10
|
-
'o1',
|
|
11
|
-
'o1-2024-12-17',
|
|
12
|
-
'o1-pro',
|
|
13
|
-
'o1-pro-2025-03-19',
|
|
14
|
-
/*
|
|
15
|
-
Official documentation shows no support, but actual testing shows Streaming is supported, temporarily commented out
|
|
16
|
-
'o3-pro',
|
|
17
|
-
'o3-pro-2025-06-10',
|
|
18
|
-
*/
|
|
19
|
-
'computer-use-preview',
|
|
20
|
-
'computer-use-preview-2025-03-11',
|
|
21
|
-
]);
|
|
22
|
-
|
|
23
|
-
/**
|
|
24
|
-
* models use Responses API only
|
|
25
|
-
*/
|
|
26
|
-
export const responsesAPIModels = new Set([
|
|
27
|
-
'o1-pro',
|
|
28
|
-
'o1-pro-2025-03-19',
|
|
29
|
-
'o3-deep-research',
|
|
30
|
-
'o3-deep-research-2025-06-26',
|
|
31
|
-
'o3-pro',
|
|
32
|
-
'o3-pro-2025-06-10',
|
|
33
|
-
'o4-mini-deep-research',
|
|
34
|
-
'o4-mini-deep-research-2025-06-26',
|
|
35
|
-
'codex-mini-latest',
|
|
36
|
-
'computer-use-preview',
|
|
37
|
-
'computer-use-preview-2025-03-11',
|
|
38
|
-
'gpt-5-codex',
|
|
39
|
-
'gpt-5-pro',
|
|
40
|
-
'gpt-5-pro-2025-10-06',
|
|
41
|
-
'gpt-5.1-codex',
|
|
42
|
-
'gpt-5.1-codex-mini',
|
|
43
|
-
]);
|
|
44
|
-
|
|
45
|
-
/**
|
|
46
|
-
* models support context caching
|
|
47
|
-
*/
|
|
48
|
-
export const contextCachingModels = new Set([
|
|
49
|
-
'claude-opus-4-5-20251101',
|
|
50
|
-
'claude-haiku-4-5-20251001',
|
|
51
|
-
'claude-sonnet-4-5-latest',
|
|
52
|
-
'claude-sonnet-4-5-20250929',
|
|
53
|
-
'anthropic/claude-sonnet-4.5',
|
|
54
|
-
'claude-opus-4-latest',
|
|
55
|
-
'claude-opus-4-20250514',
|
|
56
|
-
'claude-sonnet-4-latest',
|
|
57
|
-
'claude-sonnet-4-20250514',
|
|
58
|
-
'claude-3-7-sonnet-latest',
|
|
59
|
-
'claude-3-7-sonnet-20250219',
|
|
60
|
-
'claude-3-5-sonnet-latest',
|
|
61
|
-
'claude-3-5-sonnet-20241022',
|
|
62
|
-
'claude-3-5-sonnet-20240620',
|
|
63
|
-
'claude-3-5-haiku-latest',
|
|
64
|
-
'claude-3-5-haiku-20241022',
|
|
65
|
-
// Bedrock model IDs
|
|
66
|
-
'us.anthropic.claude-sonnet-4-5-20250929-v1:0',
|
|
67
|
-
'anthropic.claude-sonnet-4-5-20250929-v1:0',
|
|
68
|
-
'us.anthropic.claude-haiku-4-5-20251001-v1:0',
|
|
69
|
-
'anthropic.claude-haiku-4-5-20251001-v1:0',
|
|
70
|
-
'global.anthropic.claude-opus-4-5-20251101-v1:0',
|
|
71
|
-
'anthropic.claude-opus-4-5-20251101-v1:0',
|
|
72
|
-
]);
|
|
73
|
-
|
|
74
|
-
export const thinkingWithToolClaudeModels = new Set([
|
|
75
|
-
'claude-opus-4-5-20251101',
|
|
76
|
-
'claude-opus-4-latest',
|
|
77
|
-
'claude-opus-4-20250514',
|
|
78
|
-
'claude-sonnet-4-latest',
|
|
79
|
-
'claude-sonnet-4-20250514',
|
|
80
|
-
'claude-sonnet-4-5-latest',
|
|
81
|
-
'claude-sonnet-4-5-20250929',
|
|
82
|
-
'claude-haiku-4-5-20251001',
|
|
83
|
-
'anthropic/claude-sonnet-4.5',
|
|
84
|
-
'claude-3-7-sonnet-latest',
|
|
85
|
-
'claude-3-7-sonnet-20250219',
|
|
86
|
-
// Bedrock model IDs
|
|
87
|
-
'us.anthropic.claude-sonnet-4-5-20250929-v1:0',
|
|
88
|
-
'anthropic.claude-sonnet-4-5-20250929-v1:0',
|
|
89
|
-
'us.anthropic.claude-haiku-4-5-20251001-v1:0',
|
|
90
|
-
'anthropic.claude-haiku-4-5-20251001-v1:0',
|
|
91
|
-
'global.anthropic.claude-opus-4-5-20251101-v1:0',
|
|
92
|
-
'anthropic.claude-opus-4-5-20251101-v1:0',
|
|
93
|
-
]);
|