@lobehub/lobehub 2.0.0-next.305 → 2.0.0-next.306
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/e2e/src/steps/community/detail-pages.steps.ts +3 -1
- package/e2e/src/steps/community/interactions.steps.ts +4 -4
- package/package.json +1 -1
- package/packages/context-engine/src/processors/GroupMessageFlatten.ts +9 -6
- package/packages/context-engine/src/processors/__tests__/GroupMessageFlatten.test.ts +103 -0
- package/packages/context-engine/src/providers/GroupAgentBuilderContextInjector.ts +18 -31
- package/packages/context-engine/src/providers/__tests__/GroupAgentBuilderContextInjector.test.ts +307 -0
- package/packages/prompts/src/prompts/userMemory/__snapshots__/index.test.ts.snap +14 -38
- package/packages/prompts/src/prompts/userMemory/index.ts +5 -24
- package/src/app/[variants]/(main)/community/(detail)/assistant/index.tsx +1 -1
- package/src/app/[variants]/(main)/community/(detail)/mcp/index.tsx +1 -1
- package/src/app/[variants]/(main)/group/features/Conversation/MainChatInput/index.tsx +2 -2
- package/src/app/[variants]/(main)/home/_layout/Body/Agent/List/AgentItem/index.tsx +2 -2
- package/src/features/Conversation/Messages/Supervisor/index.tsx +2 -1
- package/src/features/Conversation/Messages/components/ContentLoading.tsx +8 -2
- package/src/services/chat/mecha/agentConfigResolver.ts +65 -0
- package/src/services/chat/mecha/modelParamsResolver.test.ts +211 -0
- package/src/store/agentGroup/action.ts +30 -0
- package/src/store/agentGroup/slices/lifecycle.test.ts +77 -18
- package/src/store/agentGroup/slices/lifecycle.ts +7 -9
- package/src/store/chat/slices/operation/__tests__/selectors.test.ts +124 -0
- package/src/store/chat/slices/operation/selectors.ts +22 -0
|
@@ -120,15 +120,9 @@ exports[`promptUserMemory > identities only > should format identities grouped b
|
|
|
120
120
|
"<user_memory>
|
|
121
121
|
<instruction>The following are memories about this user retrieved from previous conversations. Use this information to personalize your responses and maintain continuity.</instruction>
|
|
122
122
|
<identities count="3">
|
|
123
|
-
<personal
|
|
124
|
-
|
|
125
|
-
</
|
|
126
|
-
<professional count="1">
|
|
127
|
-
<identity role="Software Engineer">User is a senior software engineer</identity>
|
|
128
|
-
</professional>
|
|
129
|
-
<demographic count="1">
|
|
130
|
-
<identity>User is based in Shanghai</identity>
|
|
131
|
-
</demographic>
|
|
123
|
+
<identity type="personal" role="Father" id="id-1">User is a father of two children</identity>
|
|
124
|
+
<identity type="professional" role="Software Engineer" id="id-2">User is a senior software engineer</identity>
|
|
125
|
+
<identity type="demographic" id="id-3">User is based in Shanghai</identity>
|
|
132
126
|
</identities>
|
|
133
127
|
</user_memory>"
|
|
134
128
|
`;
|
|
@@ -137,10 +131,8 @@ exports[`promptUserMemory > identities only > should format single type identiti
|
|
|
137
131
|
"<user_memory>
|
|
138
132
|
<instruction>The following are memories about this user retrieved from previous conversations. Use this information to personalize your responses and maintain continuity.</instruction>
|
|
139
133
|
<identities count="2">
|
|
140
|
-
<demographic
|
|
141
|
-
|
|
142
|
-
<identity>User speaks Mandarin and English</identity>
|
|
143
|
-
</demographic>
|
|
134
|
+
<identity type="demographic" id="id-1">User is 35 years old</identity>
|
|
135
|
+
<identity type="demographic" id="id-2">User speaks Mandarin and English</identity>
|
|
144
136
|
</identities>
|
|
145
137
|
</user_memory>"
|
|
146
138
|
`;
|
|
@@ -149,10 +141,8 @@ exports[`promptUserMemory > identities only > should format single type identiti
|
|
|
149
141
|
"<user_memory>
|
|
150
142
|
<instruction>The following are memories about this user retrieved from previous conversations. Use this information to personalize your responses and maintain continuity.</instruction>
|
|
151
143
|
<identities count="2">
|
|
152
|
-
<personal
|
|
153
|
-
|
|
154
|
-
<identity>User has a pet dog</identity>
|
|
155
|
-
</personal>
|
|
144
|
+
<identity type="personal" id="id-1">User is married</identity>
|
|
145
|
+
<identity type="personal" id="id-2">User has a pet dog</identity>
|
|
156
146
|
</identities>
|
|
157
147
|
</user_memory>"
|
|
158
148
|
`;
|
|
@@ -161,9 +151,7 @@ exports[`promptUserMemory > identities only > should format single type identiti
|
|
|
161
151
|
"<user_memory>
|
|
162
152
|
<instruction>The following are memories about this user retrieved from previous conversations. Use this information to personalize your responses and maintain continuity.</instruction>
|
|
163
153
|
<identities count="1">
|
|
164
|
-
<professional
|
|
165
|
-
<identity role="CTO">User works at a tech startup</identity>
|
|
166
|
-
</professional>
|
|
154
|
+
<identity type="professional" role="CTO" id="id-1">User works at a tech startup</identity>
|
|
167
155
|
</identities>
|
|
168
156
|
</user_memory>"
|
|
169
157
|
`;
|
|
@@ -172,9 +160,7 @@ exports[`promptUserMemory > identities only > should handle identity with null v
|
|
|
172
160
|
"<user_memory>
|
|
173
161
|
<instruction>The following are memories about this user retrieved from previous conversations. Use this information to personalize your responses and maintain continuity.</instruction>
|
|
174
162
|
<identities count="1">
|
|
175
|
-
<personal
|
|
176
|
-
<identity></identity>
|
|
177
|
-
</personal>
|
|
163
|
+
<identity type="personal" id="id-1"></identity>
|
|
178
164
|
</identities>
|
|
179
165
|
</user_memory>"
|
|
180
166
|
`;
|
|
@@ -183,9 +169,7 @@ exports[`promptUserMemory > identities only > should handle identity without rol
|
|
|
183
169
|
"<user_memory>
|
|
184
170
|
<instruction>The following are memories about this user retrieved from previous conversations. Use this information to personalize your responses and maintain continuity.</instruction>
|
|
185
171
|
<identities count="1">
|
|
186
|
-
<personal
|
|
187
|
-
<identity>User enjoys hiking</identity>
|
|
188
|
-
</personal>
|
|
172
|
+
<identity type="personal" id="id-1">User enjoys hiking</identity>
|
|
189
173
|
</identities>
|
|
190
174
|
</user_memory>"
|
|
191
175
|
`;
|
|
@@ -194,9 +178,7 @@ exports[`promptUserMemory > mixed memory types > should format all memory types
|
|
|
194
178
|
"<user_memory>
|
|
195
179
|
<instruction>The following are memories about this user retrieved from previous conversations. Use this information to personalize your responses and maintain continuity.</instruction>
|
|
196
180
|
<identities count="1">
|
|
197
|
-
<professional
|
|
198
|
-
<identity role="Tech Lead">User is a tech lead at a startup</identity>
|
|
199
|
-
</professional>
|
|
181
|
+
<identity type="professional" role="Tech Lead" id="id-1">User is a tech lead at a startup</identity>
|
|
200
182
|
</identities>
|
|
201
183
|
<contexts count="1">
|
|
202
184
|
<context id="ctx-1" title="Experience Level">Senior developer with 10 years experience</context>
|
|
@@ -217,15 +199,9 @@ exports[`promptUserMemory > mixed memory types > should format all memory types
|
|
|
217
199
|
"<user_memory>
|
|
218
200
|
<instruction>The following are memories about this user retrieved from previous conversations. Use this information to personalize your responses and maintain continuity.</instruction>
|
|
219
201
|
<identities count="3">
|
|
220
|
-
<personal
|
|
221
|
-
|
|
222
|
-
</
|
|
223
|
-
<professional count="1">
|
|
224
|
-
<identity role="Senior Engineer">User is a senior engineer</identity>
|
|
225
|
-
</professional>
|
|
226
|
-
<demographic count="1">
|
|
227
|
-
<identity>User lives in Beijing</identity>
|
|
228
|
-
</demographic>
|
|
202
|
+
<identity type="personal" role="Father" id="id-1">User is a father</identity>
|
|
203
|
+
<identity type="professional" role="Senior Engineer" id="id-2">User is a senior engineer</identity>
|
|
204
|
+
<identity type="demographic" id="id-3">User lives in Beijing</identity>
|
|
229
205
|
</identities>
|
|
230
206
|
<contexts count="1">
|
|
231
207
|
<context id="ctx-1" title="Current Work">Working on AI products</context>
|
|
@@ -98,29 +98,10 @@ const isValidIdentityItem = (item: UserMemoryIdentityItem): boolean => {
|
|
|
98
98
|
* Formats a single identity memory item
|
|
99
99
|
*/
|
|
100
100
|
const formatIdentityItem = (item: UserMemoryIdentityItem): string => {
|
|
101
|
+
const typeAttr = item.type ? ` type="${item.type}"` : '';
|
|
101
102
|
const roleAttr = item.role ? ` role="${item.role}"` : '';
|
|
102
|
-
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
/**
|
|
106
|
-
* Format identities grouped by type as XML
|
|
107
|
-
* Types: personal (角色), professional (职业), demographic (属性)
|
|
108
|
-
*/
|
|
109
|
-
const formatIdentitiesSection = (identities: UserMemoryIdentityItem[]): string => {
|
|
110
|
-
const personal = identities.filter((i) => i.type === 'personal');
|
|
111
|
-
const professional = identities.filter((i) => i.type === 'professional');
|
|
112
|
-
const demographic = identities.filter((i) => i.type === 'demographic');
|
|
113
|
-
|
|
114
|
-
return [
|
|
115
|
-
personal.length > 0 &&
|
|
116
|
-
` <personal count="${personal.length}">\n${personal.map(formatIdentityItem).join('\n')}\n </personal>`,
|
|
117
|
-
professional.length > 0 &&
|
|
118
|
-
` <professional count="${professional.length}">\n${professional.map(formatIdentityItem).join('\n')}\n </professional>`,
|
|
119
|
-
demographic.length > 0 &&
|
|
120
|
-
` <demographic count="${demographic.length}">\n${demographic.map(formatIdentityItem).join('\n')}\n </demographic>`,
|
|
121
|
-
]
|
|
122
|
-
.filter(Boolean)
|
|
123
|
-
.join('\n');
|
|
103
|
+
const idAttr = item.id ? ` id="${item.id}"` : '';
|
|
104
|
+
return ` <identity${typeAttr}${roleAttr}${idAttr}>${item.description || ''}</identity>`;
|
|
124
105
|
};
|
|
125
106
|
|
|
126
107
|
/**
|
|
@@ -156,9 +137,9 @@ export const promptUserMemory = ({ memories }: PromptUserMemoryOptions): string
|
|
|
156
137
|
'<instruction>The following are memories about this user retrieved from previous conversations. Use this information to personalize your responses and maintain continuity.</instruction>',
|
|
157
138
|
);
|
|
158
139
|
|
|
159
|
-
// Add identities section (user's identity information
|
|
140
|
+
// Add identities section (user's identity information)
|
|
160
141
|
if (hasIdentities) {
|
|
161
|
-
const identitiesXml =
|
|
142
|
+
const identitiesXml = identities.map((item) => formatIdentityItem(item)).join('\n');
|
|
162
143
|
contentParts.push(`<identities count="${identities.length}">
|
|
163
144
|
${identitiesXml}
|
|
164
145
|
</identities>`);
|
|
@@ -40,7 +40,7 @@ const AssistantDetailPage = memo<AssistantDetailPageProps>(({ mobile }) => {
|
|
|
40
40
|
return (
|
|
41
41
|
<TocProvider>
|
|
42
42
|
<DetailProvider config={data}>
|
|
43
|
-
<Flexbox gap={16}>
|
|
43
|
+
<Flexbox data-testid="assistant-detail-content" gap={16}>
|
|
44
44
|
<Header mobile={mobile} />
|
|
45
45
|
<Details mobile={mobile} />
|
|
46
46
|
</Flexbox>
|
|
@@ -35,7 +35,7 @@ const McpDetailPage = memo<McpDetailPageProps>(({ mobile }) => {
|
|
|
35
35
|
return (
|
|
36
36
|
<TocProvider>
|
|
37
37
|
<DetailProvider config={data}>
|
|
38
|
-
<Flexbox gap={16}>
|
|
38
|
+
<Flexbox data-testid="mcp-detail-content" gap={16}>
|
|
39
39
|
<Header mobile={mobile} />
|
|
40
40
|
<Details mobile={mobile} />
|
|
41
41
|
</Flexbox>
|
|
@@ -11,10 +11,10 @@ import { useSendMenuItems } from './useSendMenuItems';
|
|
|
11
11
|
const leftActions: ActionKeys[] = [
|
|
12
12
|
'model',
|
|
13
13
|
'search',
|
|
14
|
-
'typo',
|
|
15
14
|
'fileUpload',
|
|
15
|
+
'tools',
|
|
16
16
|
'---',
|
|
17
|
-
['
|
|
17
|
+
['typo', 'params', 'clear'],
|
|
18
18
|
'mainToken',
|
|
19
19
|
];
|
|
20
20
|
|
|
@@ -37,8 +37,8 @@ const AgentItem = memo<AgentItemProps>(({ item, style, className }) => {
|
|
|
37
37
|
s.agentUpdatingId === id,
|
|
38
38
|
]);
|
|
39
39
|
|
|
40
|
-
// Separate loading state from chat store - only
|
|
41
|
-
const isLoading = useChatStore(operationSelectors.
|
|
40
|
+
// Separate loading state from chat store - only show loading for this specific agent
|
|
41
|
+
const isLoading = useChatStore(operationSelectors.isAgentRunning(id));
|
|
42
42
|
|
|
43
43
|
// Get display title with fallback
|
|
44
44
|
const displayTitle = title || t('untitledAgent');
|
|
@@ -55,7 +55,8 @@ const GroupMessage = memo<GroupMessageProps>(({ id, index, disableEditing, isLat
|
|
|
55
55
|
|
|
56
56
|
// Get editing state from ConversationStore
|
|
57
57
|
const creating = useConversationStore(messageStateSelectors.isMessageCreating(id));
|
|
58
|
-
const
|
|
58
|
+
const generating = useConversationStore(messageStateSelectors.isMessageGenerating(id));
|
|
59
|
+
const newScreen = useNewScreen({ creating: creating || generating, isLatestItem });
|
|
59
60
|
|
|
60
61
|
const setMessageItemActionElementPortialContext = useSetMessageItemActionElementPortialContext();
|
|
61
62
|
const setMessageItemActionTypeContext = useSetMessageItemActionTypeContext();
|
|
@@ -18,10 +18,11 @@ interface ContentLoadingProps {
|
|
|
18
18
|
const ContentLoading = memo<ContentLoadingProps>(({ id }) => {
|
|
19
19
|
const { t } = useTranslation('chat');
|
|
20
20
|
const runningOp = useChatStore(operationSelectors.getDeepestRunningOperationByMessage(id));
|
|
21
|
+
console.log('runningOp', runningOp);
|
|
21
22
|
const [elapsedSeconds, setElapsedSeconds] = useState(0);
|
|
23
|
+
const [startTime, setStartTime] = useState(runningOp?.metadata?.startTime);
|
|
22
24
|
|
|
23
25
|
const operationType = runningOp?.type as OperationType | undefined;
|
|
24
|
-
const startTime = runningOp?.metadata?.startTime;
|
|
25
26
|
|
|
26
27
|
// Track elapsed time, reset when operation type changes
|
|
27
28
|
useEffect(() => {
|
|
@@ -39,7 +40,12 @@ const ContentLoading = memo<ContentLoadingProps>(({ id }) => {
|
|
|
39
40
|
const interval = setInterval(updateElapsed, 1000);
|
|
40
41
|
|
|
41
42
|
return () => clearInterval(interval);
|
|
42
|
-
}, [startTime
|
|
43
|
+
}, [startTime]);
|
|
44
|
+
|
|
45
|
+
useEffect(() => {
|
|
46
|
+
setElapsedSeconds(0);
|
|
47
|
+
setStartTime(Date.now());
|
|
48
|
+
}, [operationType, id]);
|
|
43
49
|
|
|
44
50
|
// Get localized label based on operation type
|
|
45
51
|
const operationLabel = operationType
|
|
@@ -5,6 +5,7 @@ import {
|
|
|
5
5
|
type LobeAgentConfig,
|
|
6
6
|
type MessageMapScope,
|
|
7
7
|
} from '@lobechat/types';
|
|
8
|
+
import debug from 'debug';
|
|
8
9
|
import { produce } from 'immer';
|
|
9
10
|
|
|
10
11
|
import { getAgentStoreState } from '@/store/agent';
|
|
@@ -12,6 +13,8 @@ import { agentSelectors, chatConfigByIdSelectors } from '@/store/agent/selectors
|
|
|
12
13
|
import { getChatGroupStoreState } from '@/store/agentGroup';
|
|
13
14
|
import { agentGroupByIdSelectors, agentGroupSelectors } from '@/store/agentGroup/selectors';
|
|
14
15
|
|
|
16
|
+
const log = debug('mecha:agentConfigResolver');
|
|
17
|
+
|
|
15
18
|
/**
|
|
16
19
|
* Applies params adjustments based on chatConfig settings.
|
|
17
20
|
*
|
|
@@ -99,6 +102,8 @@ export interface ResolvedAgentConfig {
|
|
|
99
102
|
export const resolveAgentConfig = (ctx: AgentConfigResolverContext): ResolvedAgentConfig => {
|
|
100
103
|
const { agentId, model, documentContent, plugins, targetAgentConfig } = ctx;
|
|
101
104
|
|
|
105
|
+
log('resolveAgentConfig called with agentId: %s, scope: %s', agentId, ctx.scope);
|
|
106
|
+
|
|
102
107
|
const agentStoreState = getAgentStoreState();
|
|
103
108
|
|
|
104
109
|
// Get base config from store
|
|
@@ -111,19 +116,46 @@ export const resolveAgentConfig = (ctx: AgentConfigResolverContext): ResolvedAge
|
|
|
111
116
|
// Check if this is a builtin agent
|
|
112
117
|
// First check agent store, then check if this is a supervisor agent in agentGroup store
|
|
113
118
|
let slug = agentSelectors.getAgentSlugById(agentId)(agentStoreState);
|
|
119
|
+
log('slug from agentStore: %s (agentId: %s)', slug, agentId);
|
|
114
120
|
|
|
115
121
|
// If not found in agent store, check if this is a supervisor agent in any group
|
|
116
122
|
// Supervisor agents have their slug stored in agentGroup store, not agent store
|
|
117
123
|
if (!slug) {
|
|
118
124
|
const groupStoreState = getChatGroupStoreState();
|
|
125
|
+
const groupMap = groupStoreState.groupMap;
|
|
126
|
+
const groupMapKeys = Object.keys(groupMap);
|
|
127
|
+
log(
|
|
128
|
+
'checking groupStore for supervisor - groupMap has %d groups: %o',
|
|
129
|
+
groupMapKeys.length,
|
|
130
|
+
groupMapKeys.map((key) => ({
|
|
131
|
+
groupId: key,
|
|
132
|
+
supervisorAgentId: groupMap[key]?.supervisorAgentId,
|
|
133
|
+
title: groupMap[key]?.title,
|
|
134
|
+
})),
|
|
135
|
+
);
|
|
136
|
+
|
|
119
137
|
const group = agentGroupByIdSelectors.groupBySupervisorAgentId(agentId)(groupStoreState);
|
|
138
|
+
log(
|
|
139
|
+
'groupBySupervisorAgentId result for agentId %s: %o',
|
|
140
|
+
agentId,
|
|
141
|
+
group
|
|
142
|
+
? {
|
|
143
|
+
groupId: group.id,
|
|
144
|
+
supervisorAgentId: group.supervisorAgentId,
|
|
145
|
+
title: group.title,
|
|
146
|
+
}
|
|
147
|
+
: null,
|
|
148
|
+
);
|
|
149
|
+
|
|
120
150
|
if (group) {
|
|
121
151
|
// This is a supervisor agent - use the builtin slug
|
|
122
152
|
slug = BUILTIN_AGENT_SLUGS.groupSupervisor;
|
|
153
|
+
log('agentId %s identified as group supervisor, assigned slug: %s', agentId, slug);
|
|
123
154
|
}
|
|
124
155
|
}
|
|
125
156
|
|
|
126
157
|
if (!slug) {
|
|
158
|
+
log('agentId %s is not a builtin agent (no slug found)', agentId);
|
|
127
159
|
// Regular agent - use provided plugins if available, fallback to agent's plugins
|
|
128
160
|
const finalPlugins = plugins && plugins.length > 0 ? plugins : basePlugins;
|
|
129
161
|
|
|
@@ -183,18 +215,45 @@ export const resolveAgentConfig = (ctx: AgentConfigResolverContext): ResolvedAge
|
|
|
183
215
|
// Build groupSupervisorContext if this is a group-supervisor agent
|
|
184
216
|
let groupSupervisorContext;
|
|
185
217
|
if (slug === BUILTIN_AGENT_SLUGS.groupSupervisor) {
|
|
218
|
+
log('building groupSupervisorContext for agentId: %s', agentId);
|
|
186
219
|
const groupStoreState = getChatGroupStoreState();
|
|
187
220
|
// Find the group by supervisor agent ID
|
|
188
221
|
const group = agentGroupSelectors.getGroupBySupervisorAgentId(agentId)(groupStoreState);
|
|
189
222
|
|
|
223
|
+
log(
|
|
224
|
+
'getGroupBySupervisorAgentId result: %o',
|
|
225
|
+
group
|
|
226
|
+
? {
|
|
227
|
+
agentsCount: group.agents?.length,
|
|
228
|
+
groupId: group.id,
|
|
229
|
+
supervisorAgentId: group.supervisorAgentId,
|
|
230
|
+
title: group.title,
|
|
231
|
+
}
|
|
232
|
+
: null,
|
|
233
|
+
);
|
|
234
|
+
|
|
190
235
|
if (group) {
|
|
191
236
|
const groupMembers = agentGroupSelectors.getGroupMembers(group.id)(groupStoreState);
|
|
237
|
+
log(
|
|
238
|
+
'groupMembers for groupId %s: %o',
|
|
239
|
+
group.id,
|
|
240
|
+
groupMembers.map((m) => ({ id: m.id, isSupervisor: m.isSupervisor, title: m.title })),
|
|
241
|
+
);
|
|
242
|
+
|
|
192
243
|
groupSupervisorContext = {
|
|
193
244
|
availableAgents: groupMembers.map((agent) => ({ id: agent.id, title: agent.title })),
|
|
194
245
|
groupId: group.id,
|
|
195
246
|
groupTitle: group.title || 'Group Chat',
|
|
196
247
|
systemPrompt: agentConfig.systemRole,
|
|
197
248
|
};
|
|
249
|
+
log('groupSupervisorContext built: %o', {
|
|
250
|
+
availableAgentsCount: groupSupervisorContext.availableAgents.length,
|
|
251
|
+
groupId: groupSupervisorContext.groupId,
|
|
252
|
+
groupTitle: groupSupervisorContext.groupTitle,
|
|
253
|
+
hasSystemPrompt: !!groupSupervisorContext.systemPrompt,
|
|
254
|
+
});
|
|
255
|
+
} else {
|
|
256
|
+
log('WARNING: group not found for supervisor agentId: %s', agentId);
|
|
198
257
|
}
|
|
199
258
|
}
|
|
200
259
|
|
|
@@ -258,6 +317,12 @@ export const resolveAgentConfig = (ctx: AgentConfigResolverContext): ResolvedAge
|
|
|
258
317
|
// Apply params adjustments based on chatConfig
|
|
259
318
|
const finalAgentConfig = applyParamsFromChatConfig(resolvedAgentConfig, resolvedChatConfig);
|
|
260
319
|
|
|
320
|
+
log('resolveAgentConfig completed for agentId: %s, result: %o', agentId, {
|
|
321
|
+
isBuiltinAgent: true,
|
|
322
|
+
pluginsCount: finalPlugins.length,
|
|
323
|
+
slug,
|
|
324
|
+
});
|
|
325
|
+
|
|
261
326
|
return {
|
|
262
327
|
agentConfig: finalAgentConfig,
|
|
263
328
|
chatConfig: resolvedChatConfig,
|
|
@@ -800,4 +800,215 @@ describe('resolveModelExtendParams', () => {
|
|
|
800
800
|
expect(modelExtendParamsSpy).toHaveBeenCalledWith('test-model', 'test-provider');
|
|
801
801
|
});
|
|
802
802
|
});
|
|
803
|
+
|
|
804
|
+
describe('parameter precedence and conflicts', () => {
|
|
805
|
+
beforeEach(() => {
|
|
806
|
+
vi.spyOn(aiModelSelectors.aiModelSelectors, 'isModelHasExtendParams').mockReturnValue(
|
|
807
|
+
() => true,
|
|
808
|
+
);
|
|
809
|
+
});
|
|
810
|
+
|
|
811
|
+
describe('reasoning effort variants precedence', () => {
|
|
812
|
+
it('should give precedence to later reasoning effort variants when multiple are configured', () => {
|
|
813
|
+
vi.spyOn(aiModelSelectors.aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
|
|
814
|
+
'reasoningEffort',
|
|
815
|
+
'gpt5ReasoningEffort',
|
|
816
|
+
'gpt5_1ReasoningEffort',
|
|
817
|
+
]);
|
|
818
|
+
|
|
819
|
+
const result = resolveModelExtendParams({
|
|
820
|
+
chatConfig: {
|
|
821
|
+
gpt5_1ReasoningEffort: 'high',
|
|
822
|
+
gpt5ReasoningEffort: 'medium',
|
|
823
|
+
reasoningEffort: 'low',
|
|
824
|
+
} as any,
|
|
825
|
+
model: 'gpt-5.1',
|
|
826
|
+
provider: 'openai',
|
|
827
|
+
});
|
|
828
|
+
|
|
829
|
+
// gpt5_1ReasoningEffort should win as it's processed last
|
|
830
|
+
expect(result.reasoning_effort).toBe('high');
|
|
831
|
+
});
|
|
832
|
+
|
|
833
|
+
it('should handle mixed reasoning effort variants with only some configured', () => {
|
|
834
|
+
vi.spyOn(aiModelSelectors.aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
|
|
835
|
+
'reasoningEffort',
|
|
836
|
+
'gpt5ReasoningEffort',
|
|
837
|
+
'gpt5_2ReasoningEffort',
|
|
838
|
+
'gpt5_2ProReasoningEffort',
|
|
839
|
+
]);
|
|
840
|
+
|
|
841
|
+
const result = resolveModelExtendParams({
|
|
842
|
+
chatConfig: {
|
|
843
|
+
gpt5_2ProReasoningEffort: undefined,
|
|
844
|
+
gpt5_2ReasoningEffort: 'medium',
|
|
845
|
+
gpt5ReasoningEffort: undefined,
|
|
846
|
+
reasoningEffort: 'low',
|
|
847
|
+
} as any,
|
|
848
|
+
model: 'gpt-5.2',
|
|
849
|
+
provider: 'openai',
|
|
850
|
+
});
|
|
851
|
+
|
|
852
|
+
// gpt5_2ReasoningEffort should be set, others are undefined
|
|
853
|
+
expect(result.reasoning_effort).toBe('medium');
|
|
854
|
+
});
|
|
855
|
+
|
|
856
|
+
it('should use the last supported variant in processing order', () => {
|
|
857
|
+
vi.spyOn(aiModelSelectors.aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
|
|
858
|
+
'reasoningEffort',
|
|
859
|
+
'gpt5_2ProReasoningEffort',
|
|
860
|
+
]);
|
|
861
|
+
|
|
862
|
+
const result = resolveModelExtendParams({
|
|
863
|
+
chatConfig: {
|
|
864
|
+
gpt5_2ProReasoningEffort: 'high',
|
|
865
|
+
reasoningEffort: 'low',
|
|
866
|
+
} as any,
|
|
867
|
+
model: 'gpt-5.2-pro',
|
|
868
|
+
provider: 'openai',
|
|
869
|
+
});
|
|
870
|
+
|
|
871
|
+
// gpt5_2ProReasoningEffort is processed after reasoningEffort
|
|
872
|
+
expect(result.reasoning_effort).toBe('high');
|
|
873
|
+
});
|
|
874
|
+
});
|
|
875
|
+
|
|
876
|
+
describe('thinking configuration conflicts', () => {
|
|
877
|
+
it('should allow thinking type param to overwrite enableReasoning thinking config', () => {
|
|
878
|
+
vi.spyOn(aiModelSelectors.aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
|
|
879
|
+
'enableReasoning',
|
|
880
|
+
'thinking',
|
|
881
|
+
]);
|
|
882
|
+
|
|
883
|
+
const result = resolveModelExtendParams({
|
|
884
|
+
chatConfig: {
|
|
885
|
+
enableReasoning: true,
|
|
886
|
+
reasoningBudgetToken: 2048,
|
|
887
|
+
thinking: 'extended',
|
|
888
|
+
} as any,
|
|
889
|
+
model: 'model',
|
|
890
|
+
provider: 'provider',
|
|
891
|
+
});
|
|
892
|
+
|
|
893
|
+
// thinking param overwrites enableReasoning's thinking config
|
|
894
|
+
expect(result.thinking).toEqual({
|
|
895
|
+
type: 'extended',
|
|
896
|
+
});
|
|
897
|
+
});
|
|
898
|
+
|
|
899
|
+
it('should handle reasoningBudgetToken with thinking type param', () => {
|
|
900
|
+
vi.spyOn(aiModelSelectors.aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
|
|
901
|
+
'reasoningBudgetToken',
|
|
902
|
+
'thinking',
|
|
903
|
+
]);
|
|
904
|
+
|
|
905
|
+
const result = resolveModelExtendParams({
|
|
906
|
+
chatConfig: {
|
|
907
|
+
reasoningBudgetToken: 4096,
|
|
908
|
+
thinking: 'basic',
|
|
909
|
+
} as any,
|
|
910
|
+
model: 'model',
|
|
911
|
+
provider: 'provider',
|
|
912
|
+
});
|
|
913
|
+
|
|
914
|
+
// thinking param should overwrite the entire thinking config
|
|
915
|
+
expect(result.thinking).toEqual({
|
|
916
|
+
type: 'basic',
|
|
917
|
+
});
|
|
918
|
+
});
|
|
919
|
+
|
|
920
|
+
it('should combine independent thinking params without conflict', () => {
|
|
921
|
+
vi.spyOn(aiModelSelectors.aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
|
|
922
|
+
'thinking',
|
|
923
|
+
'thinkingBudget',
|
|
924
|
+
'thinkingLevel',
|
|
925
|
+
]);
|
|
926
|
+
|
|
927
|
+
const result = resolveModelExtendParams({
|
|
928
|
+
chatConfig: {
|
|
929
|
+
thinking: 'enabled',
|
|
930
|
+
thinkingBudget: 5000,
|
|
931
|
+
thinkingLevel: 'advanced',
|
|
932
|
+
} as any,
|
|
933
|
+
model: 'model',
|
|
934
|
+
provider: 'provider',
|
|
935
|
+
});
|
|
936
|
+
|
|
937
|
+
// These are independent params and should all be set
|
|
938
|
+
expect(result.thinking).toEqual({ type: 'enabled' });
|
|
939
|
+
expect(result.thinkingBudget).toBe(5000);
|
|
940
|
+
expect(result.thinkingLevel).toBe('advanced');
|
|
941
|
+
});
|
|
942
|
+
});
|
|
943
|
+
|
|
944
|
+
describe('complex multi-parameter scenarios', () => {
|
|
945
|
+
it('should handle all reasoning variants with context caching and verbosity', () => {
|
|
946
|
+
vi.spyOn(aiModelSelectors.aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
|
|
947
|
+
'enableReasoning',
|
|
948
|
+
'reasoningEffort',
|
|
949
|
+
'gpt5ReasoningEffort',
|
|
950
|
+
'disableContextCaching',
|
|
951
|
+
'textVerbosity',
|
|
952
|
+
]);
|
|
953
|
+
|
|
954
|
+
const result = resolveModelExtendParams({
|
|
955
|
+
chatConfig: {
|
|
956
|
+
disableContextCaching: true,
|
|
957
|
+
enableReasoning: true,
|
|
958
|
+
gpt5ReasoningEffort: 'high',
|
|
959
|
+
reasoningBudgetToken: 3000,
|
|
960
|
+
reasoningEffort: 'medium',
|
|
961
|
+
textVerbosity: 'verbose',
|
|
962
|
+
} as any,
|
|
963
|
+
model: 'gpt-5',
|
|
964
|
+
provider: 'openai',
|
|
965
|
+
});
|
|
966
|
+
|
|
967
|
+
expect(result).toEqual({
|
|
968
|
+
enabledContextCaching: false,
|
|
969
|
+
reasoning_effort: 'high',
|
|
970
|
+
thinking: {
|
|
971
|
+
budget_tokens: 3000,
|
|
972
|
+
type: 'enabled',
|
|
973
|
+
},
|
|
974
|
+
verbosity: 'verbose',
|
|
975
|
+
});
|
|
976
|
+
});
|
|
977
|
+
|
|
978
|
+
it('should handle all params when none are configured', () => {
|
|
979
|
+
vi.spyOn(aiModelSelectors.aiModelSelectors, 'modelExtendParams').mockReturnValue(() => [
|
|
980
|
+
'enableReasoning',
|
|
981
|
+
'reasoningEffort',
|
|
982
|
+
'textVerbosity',
|
|
983
|
+
'thinking',
|
|
984
|
+
'thinkingBudget',
|
|
985
|
+
'thinkingLevel',
|
|
986
|
+
'urlContext',
|
|
987
|
+
'imageAspectRatio',
|
|
988
|
+
'imageResolution',
|
|
989
|
+
'disableContextCaching',
|
|
990
|
+
]);
|
|
991
|
+
|
|
992
|
+
const result = resolveModelExtendParams({
|
|
993
|
+
chatConfig: {} as any,
|
|
994
|
+
model: 'model',
|
|
995
|
+
provider: 'provider',
|
|
996
|
+
});
|
|
997
|
+
|
|
998
|
+
// Only enableReasoning should set thinking to disabled, others should be undefined
|
|
999
|
+
expect(result.thinking).toEqual({
|
|
1000
|
+
budget_tokens: 0,
|
|
1001
|
+
type: 'disabled',
|
|
1002
|
+
});
|
|
1003
|
+
expect(result.reasoning_effort).toBeUndefined();
|
|
1004
|
+
expect(result.verbosity).toBeUndefined();
|
|
1005
|
+
expect(result.thinkingBudget).toBeUndefined();
|
|
1006
|
+
expect(result.thinkingLevel).toBeUndefined();
|
|
1007
|
+
expect(result.urlContext).toBeUndefined();
|
|
1008
|
+
expect(result.imageAspectRatio).toBeUndefined();
|
|
1009
|
+
expect(result.imageResolution).toBeUndefined();
|
|
1010
|
+
expect(result.enabledContextCaching).toBeUndefined();
|
|
1011
|
+
});
|
|
1012
|
+
});
|
|
1013
|
+
});
|
|
803
1014
|
});
|
|
@@ -48,6 +48,12 @@ export interface ChatGroupInternalAction {
|
|
|
48
48
|
type: string;
|
|
49
49
|
},
|
|
50
50
|
) => void;
|
|
51
|
+
/**
|
|
52
|
+
* Fetch group detail directly and update store.
|
|
53
|
+
* Unlike refreshGroupDetail which uses SWR mutate, this method fetches immediately
|
|
54
|
+
* and is useful when SWR hook is not yet mounted (e.g., after createGroup).
|
|
55
|
+
*/
|
|
56
|
+
internal_fetchGroupDetail: (groupId: string) => Promise<void>;
|
|
51
57
|
internal_updateGroupMaps: (groups: ChatGroupItem[]) => void;
|
|
52
58
|
loadGroups: () => Promise<void>;
|
|
53
59
|
refreshGroupDetail: (groupId: string) => Promise<void>;
|
|
@@ -91,6 +97,30 @@ const chatGroupInternalSlice: StateCreator<
|
|
|
91
97
|
return {
|
|
92
98
|
internal_dispatchChatGroup: dispatch,
|
|
93
99
|
|
|
100
|
+
internal_fetchGroupDetail: async (groupId: string) => {
|
|
101
|
+
const groupDetail = await chatGroupService.getGroupDetail(groupId);
|
|
102
|
+
if (!groupDetail) return;
|
|
103
|
+
|
|
104
|
+
// Update groupMap with full group detail including supervisorAgentId and agents
|
|
105
|
+
dispatch({ payload: { id: groupDetail.id, value: groupDetail }, type: 'updateGroup' });
|
|
106
|
+
|
|
107
|
+
// Sync group agents to agentStore for builtin agent resolution
|
|
108
|
+
const agentStore = getAgentStoreState();
|
|
109
|
+
for (const agent of groupDetail.agents) {
|
|
110
|
+
agentStore.internal_dispatchAgentMap(agent.id, agent as any);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Set activeAgentId to supervisor for correct model resolution
|
|
114
|
+
if (groupDetail.supervisorAgentId) {
|
|
115
|
+
agentStore.setActiveAgentId(groupDetail.supervisorAgentId);
|
|
116
|
+
useChatStore.setState(
|
|
117
|
+
{ activeAgentId: groupDetail.supervisorAgentId },
|
|
118
|
+
false,
|
|
119
|
+
'syncActiveAgentIdFromAgentGroup',
|
|
120
|
+
);
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
|
|
94
124
|
internal_updateGroupMaps: (groups) => {
|
|
95
125
|
// Build a candidate map from incoming groups
|
|
96
126
|
const incomingMap = groups.reduce(
|