@lobehub/lobehub 2.0.0-next.75 → 2.0.0-next.77
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/package.json +1 -1
- package/packages/model-runtime/src/core/contextBuilders/openai.ts +2 -0
- package/packages/model-runtime/src/core/streams/openai/__snapshots__/responsesStream.test.ts.snap +1007 -0
- package/packages/model-runtime/src/core/streams/openai/openai.test.ts +310 -0
- package/packages/model-runtime/src/core/streams/openai/openai.ts +67 -0
- package/packages/model-runtime/src/providers/minimax/index.ts +36 -6
- package/packages/model-runtime/src/providers/novita/__snapshots__/index.test.ts.snap +584 -0
- package/packages/model-runtime/src/providers/openai/__snapshots__/index.test.ts.snap +517 -0
- package/packages/model-runtime/src/providers/ppio/__snapshots__/index.test.ts.snap +27 -0
- package/packages/utils/src/colorUtils.test.ts +167 -0
- package/src/app/[variants]/(main)/settings/provider/detail/index.tsx +4 -0
- package/src/locales/default/error.ts +2 -2
- /package/src/app/[variants]/(main)/settings/provider/detail/newapi/{page.tsx → index.tsx} +0 -0
|
@@ -2317,6 +2317,155 @@ describe('OpenAIStream', () => {
|
|
|
2317
2317
|
);
|
|
2318
2318
|
});
|
|
2319
2319
|
|
|
2320
|
+
it('should handle reasoning_details array format from MiniMax M2', async () => {
|
|
2321
|
+
const data = [
|
|
2322
|
+
{
|
|
2323
|
+
id: '055ccc4cbe1ca0dc18037256237d0823',
|
|
2324
|
+
object: 'chat.completion.chunk',
|
|
2325
|
+
created: 1762498892,
|
|
2326
|
+
model: 'MiniMax-M2',
|
|
2327
|
+
choices: [
|
|
2328
|
+
{
|
|
2329
|
+
index: 0,
|
|
2330
|
+
delta: {
|
|
2331
|
+
content: '',
|
|
2332
|
+
role: 'assistant',
|
|
2333
|
+
name: 'MiniMax AI',
|
|
2334
|
+
audio_content: '',
|
|
2335
|
+
reasoning_details: [
|
|
2336
|
+
{
|
|
2337
|
+
type: 'reasoning.text',
|
|
2338
|
+
id: 'reasoning-text-1',
|
|
2339
|
+
format: 'MiniMax-response-v1',
|
|
2340
|
+
index: 0,
|
|
2341
|
+
text: '中文打招呼说"你好",',
|
|
2342
|
+
},
|
|
2343
|
+
],
|
|
2344
|
+
},
|
|
2345
|
+
finish_reason: null,
|
|
2346
|
+
},
|
|
2347
|
+
],
|
|
2348
|
+
usage: null,
|
|
2349
|
+
},
|
|
2350
|
+
{
|
|
2351
|
+
id: '055ccc4cbe1ca0dc18037256237d0823',
|
|
2352
|
+
object: 'chat.completion.chunk',
|
|
2353
|
+
created: 1762498892,
|
|
2354
|
+
model: 'MiniMax-M2',
|
|
2355
|
+
choices: [
|
|
2356
|
+
{
|
|
2357
|
+
index: 0,
|
|
2358
|
+
delta: {
|
|
2359
|
+
reasoning_details: [
|
|
2360
|
+
{
|
|
2361
|
+
type: 'reasoning.text',
|
|
2362
|
+
id: 'reasoning-text-2',
|
|
2363
|
+
format: 'MiniMax-response-v1',
|
|
2364
|
+
index: 0,
|
|
2365
|
+
text: '我需要用中文回复。',
|
|
2366
|
+
},
|
|
2367
|
+
],
|
|
2368
|
+
},
|
|
2369
|
+
finish_reason: null,
|
|
2370
|
+
},
|
|
2371
|
+
],
|
|
2372
|
+
usage: null,
|
|
2373
|
+
},
|
|
2374
|
+
{
|
|
2375
|
+
id: '055ccc4cbe1ca0dc18037256237d0823',
|
|
2376
|
+
object: 'chat.completion.chunk',
|
|
2377
|
+
created: 1762498892,
|
|
2378
|
+
model: 'MiniMax-M2',
|
|
2379
|
+
choices: [
|
|
2380
|
+
{
|
|
2381
|
+
index: 0,
|
|
2382
|
+
delta: {
|
|
2383
|
+
content: '你好',
|
|
2384
|
+
},
|
|
2385
|
+
finish_reason: null,
|
|
2386
|
+
},
|
|
2387
|
+
],
|
|
2388
|
+
usage: null,
|
|
2389
|
+
},
|
|
2390
|
+
{
|
|
2391
|
+
id: '055ccc4cbe1ca0dc18037256237d0823',
|
|
2392
|
+
object: 'chat.completion.chunk',
|
|
2393
|
+
created: 1762498892,
|
|
2394
|
+
model: 'MiniMax-M2',
|
|
2395
|
+
choices: [
|
|
2396
|
+
{
|
|
2397
|
+
index: 0,
|
|
2398
|
+
delta: {
|
|
2399
|
+
content: '!',
|
|
2400
|
+
},
|
|
2401
|
+
finish_reason: null,
|
|
2402
|
+
},
|
|
2403
|
+
],
|
|
2404
|
+
usage: null,
|
|
2405
|
+
},
|
|
2406
|
+
{
|
|
2407
|
+
id: '055ccc4cbe1ca0dc18037256237d0823',
|
|
2408
|
+
object: 'chat.completion.chunk',
|
|
2409
|
+
created: 1762498892,
|
|
2410
|
+
model: 'MiniMax-M2',
|
|
2411
|
+
choices: [
|
|
2412
|
+
{
|
|
2413
|
+
index: 0,
|
|
2414
|
+
delta: {
|
|
2415
|
+
content: '',
|
|
2416
|
+
},
|
|
2417
|
+
finish_reason: 'stop',
|
|
2418
|
+
},
|
|
2419
|
+
],
|
|
2420
|
+
usage: {
|
|
2421
|
+
prompt_tokens: 10,
|
|
2422
|
+
completion_tokens: 20,
|
|
2423
|
+
total_tokens: 30,
|
|
2424
|
+
},
|
|
2425
|
+
},
|
|
2426
|
+
];
|
|
2427
|
+
|
|
2428
|
+
const mockOpenAIStream = new ReadableStream({
|
|
2429
|
+
start(controller) {
|
|
2430
|
+
data.forEach((chunk) => {
|
|
2431
|
+
controller.enqueue(chunk);
|
|
2432
|
+
});
|
|
2433
|
+
|
|
2434
|
+
controller.close();
|
|
2435
|
+
},
|
|
2436
|
+
});
|
|
2437
|
+
|
|
2438
|
+
const protocolStream = OpenAIStream(mockOpenAIStream);
|
|
2439
|
+
|
|
2440
|
+
const decoder = new TextDecoder();
|
|
2441
|
+
const chunks = [];
|
|
2442
|
+
|
|
2443
|
+
// @ts-ignore
|
|
2444
|
+
for await (const chunk of protocolStream) {
|
|
2445
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
2446
|
+
}
|
|
2447
|
+
|
|
2448
|
+
expect(chunks).toEqual(
|
|
2449
|
+
[
|
|
2450
|
+
'id: 055ccc4cbe1ca0dc18037256237d0823',
|
|
2451
|
+
'event: reasoning',
|
|
2452
|
+
`data: "中文打招呼说\\"你好\\","\n`,
|
|
2453
|
+
'id: 055ccc4cbe1ca0dc18037256237d0823',
|
|
2454
|
+
'event: reasoning',
|
|
2455
|
+
`data: "我需要用中文回复。"\n`,
|
|
2456
|
+
'id: 055ccc4cbe1ca0dc18037256237d0823',
|
|
2457
|
+
'event: text',
|
|
2458
|
+
`data: "你好"\n`,
|
|
2459
|
+
'id: 055ccc4cbe1ca0dc18037256237d0823',
|
|
2460
|
+
'event: text',
|
|
2461
|
+
`data: "!"\n`,
|
|
2462
|
+
'id: 055ccc4cbe1ca0dc18037256237d0823',
|
|
2463
|
+
'event: usage',
|
|
2464
|
+
`data: {"inputTextTokens":10,"outputTextTokens":20,"totalInputTokens":10,"totalOutputTokens":20,"totalTokens":30}\n`,
|
|
2465
|
+
].map((i) => `${i}\n`),
|
|
2466
|
+
);
|
|
2467
|
+
});
|
|
2468
|
+
|
|
2320
2469
|
it('should handle claude reasoning in litellm openai mode', async () => {
|
|
2321
2470
|
const data = [
|
|
2322
2471
|
{
|
|
@@ -3346,5 +3495,166 @@ describe('OpenAIStream', () => {
|
|
|
3346
3495
|
'chat response streaming chunk parse error, please contact your API Provider to fix it.',
|
|
3347
3496
|
);
|
|
3348
3497
|
});
|
|
3498
|
+
|
|
3499
|
+
it('should handle MiniMax base_resp error with insufficient quota (1008)', async () => {
|
|
3500
|
+
const mockOpenAIStream = new ReadableStream({
|
|
3501
|
+
start(controller) {
|
|
3502
|
+
controller.enqueue({
|
|
3503
|
+
id: 'minimax-error-1008',
|
|
3504
|
+
choices: null,
|
|
3505
|
+
base_resp: {
|
|
3506
|
+
status_code: 1008,
|
|
3507
|
+
status_msg: 'insufficient balance',
|
|
3508
|
+
},
|
|
3509
|
+
});
|
|
3510
|
+
|
|
3511
|
+
controller.close();
|
|
3512
|
+
},
|
|
3513
|
+
});
|
|
3514
|
+
|
|
3515
|
+
const protocolStream = OpenAIStream(mockOpenAIStream);
|
|
3516
|
+
|
|
3517
|
+
const decoder = new TextDecoder();
|
|
3518
|
+
const chunks = [];
|
|
3519
|
+
|
|
3520
|
+
// @ts-ignore
|
|
3521
|
+
for await (const chunk of protocolStream) {
|
|
3522
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
3523
|
+
}
|
|
3524
|
+
|
|
3525
|
+
expect(chunks[0]).toBe('id: minimax-error-1008\n');
|
|
3526
|
+
expect(chunks[1]).toBe('event: error\n');
|
|
3527
|
+
expect(chunks[2]).toContain('InsufficientQuota');
|
|
3528
|
+
expect(chunks[2]).toContain('insufficient balance');
|
|
3529
|
+
expect(chunks[2]).toContain('minimax');
|
|
3530
|
+
});
|
|
3531
|
+
|
|
3532
|
+
it('should handle MiniMax base_resp error with invalid API key (2049)', async () => {
|
|
3533
|
+
const mockOpenAIStream = new ReadableStream({
|
|
3534
|
+
start(controller) {
|
|
3535
|
+
controller.enqueue({
|
|
3536
|
+
id: 'minimax-error-2049',
|
|
3537
|
+
choices: null,
|
|
3538
|
+
base_resp: {
|
|
3539
|
+
status_code: 2049,
|
|
3540
|
+
status_msg: 'invalid API Key',
|
|
3541
|
+
},
|
|
3542
|
+
});
|
|
3543
|
+
|
|
3544
|
+
controller.close();
|
|
3545
|
+
},
|
|
3546
|
+
});
|
|
3547
|
+
|
|
3548
|
+
const protocolStream = OpenAIStream(mockOpenAIStream);
|
|
3549
|
+
|
|
3550
|
+
const decoder = new TextDecoder();
|
|
3551
|
+
const chunks = [];
|
|
3552
|
+
|
|
3553
|
+
// @ts-ignore
|
|
3554
|
+
for await (const chunk of protocolStream) {
|
|
3555
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
3556
|
+
}
|
|
3557
|
+
|
|
3558
|
+
expect(chunks[0]).toBe('id: minimax-error-2049\n');
|
|
3559
|
+
expect(chunks[1]).toBe('event: error\n');
|
|
3560
|
+
expect(chunks[2]).toContain('InvalidProviderAPIKey');
|
|
3561
|
+
expect(chunks[2]).toContain('invalid API Key');
|
|
3562
|
+
});
|
|
3563
|
+
|
|
3564
|
+
it('should handle MiniMax base_resp error with rate limit (1002)', async () => {
|
|
3565
|
+
const mockOpenAIStream = new ReadableStream({
|
|
3566
|
+
start(controller) {
|
|
3567
|
+
controller.enqueue({
|
|
3568
|
+
id: 'minimax-error-1002',
|
|
3569
|
+
choices: null,
|
|
3570
|
+
base_resp: {
|
|
3571
|
+
status_code: 1002,
|
|
3572
|
+
status_msg: 'request frequency exceeds limit',
|
|
3573
|
+
},
|
|
3574
|
+
});
|
|
3575
|
+
|
|
3576
|
+
controller.close();
|
|
3577
|
+
},
|
|
3578
|
+
});
|
|
3579
|
+
|
|
3580
|
+
const protocolStream = OpenAIStream(mockOpenAIStream);
|
|
3581
|
+
|
|
3582
|
+
const decoder = new TextDecoder();
|
|
3583
|
+
const chunks = [];
|
|
3584
|
+
|
|
3585
|
+
// @ts-ignore
|
|
3586
|
+
for await (const chunk of protocolStream) {
|
|
3587
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
3588
|
+
}
|
|
3589
|
+
|
|
3590
|
+
expect(chunks[0]).toBe('id: minimax-error-1002\n');
|
|
3591
|
+
expect(chunks[1]).toBe('event: error\n');
|
|
3592
|
+
expect(chunks[2]).toContain('QuotaLimitReached');
|
|
3593
|
+
expect(chunks[2]).toContain('request frequency exceeds limit');
|
|
3594
|
+
});
|
|
3595
|
+
|
|
3596
|
+
it('should handle MiniMax base_resp error with context window exceeded (1039)', async () => {
|
|
3597
|
+
const mockOpenAIStream = new ReadableStream({
|
|
3598
|
+
start(controller) {
|
|
3599
|
+
controller.enqueue({
|
|
3600
|
+
id: 'minimax-error-1039',
|
|
3601
|
+
choices: null,
|
|
3602
|
+
base_resp: {
|
|
3603
|
+
status_code: 1039,
|
|
3604
|
+
status_msg: 'token limit exceeded',
|
|
3605
|
+
},
|
|
3606
|
+
});
|
|
3607
|
+
|
|
3608
|
+
controller.close();
|
|
3609
|
+
},
|
|
3610
|
+
});
|
|
3611
|
+
|
|
3612
|
+
const protocolStream = OpenAIStream(mockOpenAIStream);
|
|
3613
|
+
|
|
3614
|
+
const decoder = new TextDecoder();
|
|
3615
|
+
const chunks = [];
|
|
3616
|
+
|
|
3617
|
+
// @ts-ignore
|
|
3618
|
+
for await (const chunk of protocolStream) {
|
|
3619
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
3620
|
+
}
|
|
3621
|
+
|
|
3622
|
+
expect(chunks[0]).toBe('id: minimax-error-1039\n');
|
|
3623
|
+
expect(chunks[1]).toBe('event: error\n');
|
|
3624
|
+
expect(chunks[2]).toContain('ExceededContextWindow');
|
|
3625
|
+
expect(chunks[2]).toContain('token limit exceeded');
|
|
3626
|
+
});
|
|
3627
|
+
|
|
3628
|
+
it('should handle MiniMax base_resp error with fallback to ProviderBizError', async () => {
|
|
3629
|
+
const mockOpenAIStream = new ReadableStream({
|
|
3630
|
+
start(controller) {
|
|
3631
|
+
controller.enqueue({
|
|
3632
|
+
id: 'minimax-error-unknown',
|
|
3633
|
+
choices: null,
|
|
3634
|
+
base_resp: {
|
|
3635
|
+
status_code: 9999,
|
|
3636
|
+
status_msg: 'unknown error',
|
|
3637
|
+
},
|
|
3638
|
+
});
|
|
3639
|
+
|
|
3640
|
+
controller.close();
|
|
3641
|
+
},
|
|
3642
|
+
});
|
|
3643
|
+
|
|
3644
|
+
const protocolStream = OpenAIStream(mockOpenAIStream);
|
|
3645
|
+
|
|
3646
|
+
const decoder = new TextDecoder();
|
|
3647
|
+
const chunks = [];
|
|
3648
|
+
|
|
3649
|
+
// @ts-ignore
|
|
3650
|
+
for await (const chunk of protocolStream) {
|
|
3651
|
+
chunks.push(decoder.decode(chunk, { stream: true }));
|
|
3652
|
+
}
|
|
3653
|
+
|
|
3654
|
+
expect(chunks[0]).toBe('id: minimax-error-unknown\n');
|
|
3655
|
+
expect(chunks[1]).toBe('event: error\n');
|
|
3656
|
+
expect(chunks[2]).toContain('ProviderBizError');
|
|
3657
|
+
expect(chunks[2]).toContain('unknown error');
|
|
3658
|
+
});
|
|
3349
3659
|
});
|
|
3350
3660
|
});
|
|
@@ -71,6 +71,55 @@ const transformOpenAIStream = (
|
|
|
71
71
|
return { data: errorData, id: 'first_chunk_error', type: 'error' };
|
|
72
72
|
}
|
|
73
73
|
|
|
74
|
+
// MiniMax 会在 base_resp 中返回业务错误(如余额不足),但不走 FIRST_CHUNK_ERROR_KEY
|
|
75
|
+
// 典型返回:{ id: '...', choices: null, base_resp: { status_code: 1008, status_msg: 'insufficient balance' }, usage: {...} }
|
|
76
|
+
if ((chunk as any).base_resp && typeof (chunk as any).base_resp.status_code === 'number') {
|
|
77
|
+
const baseResp = (chunk as any).base_resp as {
|
|
78
|
+
message?: string;
|
|
79
|
+
status_code: number;
|
|
80
|
+
status_msg?: string;
|
|
81
|
+
};
|
|
82
|
+
|
|
83
|
+
if (baseResp.status_code !== 0) {
|
|
84
|
+
// 根据 MiniMax 错误码映射到对应的错误类型
|
|
85
|
+
let errorType: ILobeAgentRuntimeErrorType = AgentRuntimeErrorType.ProviderBizError;
|
|
86
|
+
|
|
87
|
+
switch (baseResp.status_code) {
|
|
88
|
+
// 1004 - 未授权 / Token 不匹配 / 2049 - 无效的 API Key
|
|
89
|
+
case 1004:
|
|
90
|
+
case 2049: {
|
|
91
|
+
errorType = AgentRuntimeErrorType.InvalidProviderAPIKey;
|
|
92
|
+
break;
|
|
93
|
+
}
|
|
94
|
+
// 1008 - 余额不足
|
|
95
|
+
case 1008: {
|
|
96
|
+
errorType = AgentRuntimeErrorType.InsufficientQuota;
|
|
97
|
+
break;
|
|
98
|
+
}
|
|
99
|
+
// 1002 - 请求频率超限 / 1041 - 连接数限制 / 2045 - 请求频率增长超限
|
|
100
|
+
case 1002:
|
|
101
|
+
case 1041:
|
|
102
|
+
case 2045: {
|
|
103
|
+
errorType = AgentRuntimeErrorType.QuotaLimitReached;
|
|
104
|
+
break;
|
|
105
|
+
}
|
|
106
|
+
// 1039 - Token 限制
|
|
107
|
+
case 1039: {
|
|
108
|
+
errorType = AgentRuntimeErrorType.ExceededContextWindow;
|
|
109
|
+
break;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
const errorData: ChatMessageError = {
|
|
114
|
+
body: { ...baseResp, provider: 'minimax' },
|
|
115
|
+
message: baseResp.status_msg || baseResp.message || 'MiniMax provider error',
|
|
116
|
+
type: errorType,
|
|
117
|
+
};
|
|
118
|
+
|
|
119
|
+
return { data: errorData, id: chunk.id, type: 'error' };
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
74
123
|
try {
|
|
75
124
|
// maybe need another structure to add support for multiple choices
|
|
76
125
|
if (!Array.isArray(chunk.choices) || chunk.choices.length === 0) {
|
|
@@ -265,6 +314,24 @@ const transformOpenAIStream = (
|
|
|
265
314
|
let reasoning_content = (() => {
|
|
266
315
|
if ('reasoning_content' in item.delta) return item.delta.reasoning_content;
|
|
267
316
|
if ('reasoning' in item.delta) return item.delta.reasoning;
|
|
317
|
+
// Handle MiniMax M2 reasoning_details format (array of objects with text field)
|
|
318
|
+
if ('reasoning_details' in item.delta) {
|
|
319
|
+
const details = item.delta.reasoning_details;
|
|
320
|
+
if (Array.isArray(details)) {
|
|
321
|
+
return details
|
|
322
|
+
.filter((detail: any) => detail.text)
|
|
323
|
+
.map((detail: any) => detail.text)
|
|
324
|
+
.join('');
|
|
325
|
+
}
|
|
326
|
+
if (typeof details === 'string') {
|
|
327
|
+
return details;
|
|
328
|
+
}
|
|
329
|
+
if (typeof details === 'object' && details !== null && 'text' in details) {
|
|
330
|
+
return details.text;
|
|
331
|
+
}
|
|
332
|
+
// Fallback for unexpected types
|
|
333
|
+
return '';
|
|
334
|
+
}
|
|
268
335
|
// Handle content array format with thinking blocks (e.g. mistral AI Magistral model)
|
|
269
336
|
if ('content' in item.delta && Array.isArray(item.delta.content)) {
|
|
270
337
|
return item.delta.content
|
|
@@ -13,17 +13,45 @@ export const LobeMinimaxAI = createOpenAICompatibleRuntime({
|
|
|
13
13
|
baseURL: 'https://api.minimaxi.com/v1',
|
|
14
14
|
chatCompletion: {
|
|
15
15
|
handlePayload: (payload) => {
|
|
16
|
-
const { enabledSearch, max_tokens, temperature, tools, top_p, ...params } = payload;
|
|
16
|
+
const { enabledSearch, max_tokens, messages, temperature, tools, top_p, ...params } = payload;
|
|
17
17
|
|
|
18
18
|
const minimaxTools = enabledSearch
|
|
19
19
|
? [
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
20
|
+
...(tools || []),
|
|
21
|
+
{
|
|
22
|
+
type: 'web_search',
|
|
23
|
+
},
|
|
24
|
+
]
|
|
25
25
|
: tools;
|
|
26
26
|
|
|
27
|
+
// Interleaved thinking
|
|
28
|
+
const processedMessages = messages.map((message: any) => {
|
|
29
|
+
if (message.role === 'assistant' && message.reasoning) {
|
|
30
|
+
// 只处理没有 signature 的历史推理内容
|
|
31
|
+
if (!message.reasoning.signature && message.reasoning.content) {
|
|
32
|
+
const { reasoning, ...messageWithoutReasoning } = message;
|
|
33
|
+
return {
|
|
34
|
+
...messageWithoutReasoning,
|
|
35
|
+
reasoning_details: [
|
|
36
|
+
{
|
|
37
|
+
format: 'MiniMax-response-v1',
|
|
38
|
+
id: 'reasoning-text-0',
|
|
39
|
+
index: 0,
|
|
40
|
+
text: reasoning.content,
|
|
41
|
+
type: 'reasoning.text',
|
|
42
|
+
},
|
|
43
|
+
],
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// 有 signature 或没有 content 的情况,移除 reasoning 字段
|
|
48
|
+
// eslint-disable-next-line unused-imports/no-unused-vars, @typescript-eslint/no-unused-vars
|
|
49
|
+
const { reasoning, ...messageWithoutReasoning } = message;
|
|
50
|
+
return messageWithoutReasoning;
|
|
51
|
+
}
|
|
52
|
+
return message;
|
|
53
|
+
});
|
|
54
|
+
|
|
27
55
|
// Resolve parameters with constraints
|
|
28
56
|
const resolvedParams = resolveParameters(
|
|
29
57
|
{
|
|
@@ -46,6 +74,8 @@ export const LobeMinimaxAI = createOpenAICompatibleRuntime({
|
|
|
46
74
|
return {
|
|
47
75
|
...params,
|
|
48
76
|
max_tokens: resolvedParams.max_tokens,
|
|
77
|
+
messages: processedMessages,
|
|
78
|
+
reasoning_split: true,
|
|
49
79
|
temperature: finalTemperature,
|
|
50
80
|
tools: minimaxTools,
|
|
51
81
|
top_p: resolvedParams.top_p,
|