@lobehub/lobehub 2.0.0-next.81 → 2.0.0-next.83

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/CHANGELOG.md +58 -0
  2. package/changelog/v1.json +21 -0
  3. package/docs/usage/providers/comfyui.mdx +1 -1
  4. package/docs/usage/providers/comfyui.zh-CN.mdx +1 -1
  5. package/locales/ar/error.json +2 -2
  6. package/locales/ar/modelProvider.json +1 -1
  7. package/locales/ar/models.json +7 -1
  8. package/locales/bg-BG/error.json +2 -2
  9. package/locales/bg-BG/modelProvider.json +1 -1
  10. package/locales/bg-BG/models.json +7 -1
  11. package/locales/de-DE/error.json +2 -2
  12. package/locales/de-DE/modelProvider.json +1 -1
  13. package/locales/de-DE/models.json +7 -1
  14. package/locales/en-US/error.json +2 -2
  15. package/locales/en-US/modelProvider.json +1 -1
  16. package/locales/en-US/models.json +7 -1
  17. package/locales/es-ES/error.json +2 -2
  18. package/locales/es-ES/modelProvider.json +1 -1
  19. package/locales/es-ES/models.json +7 -1
  20. package/locales/fa-IR/error.json +2 -2
  21. package/locales/fa-IR/modelProvider.json +1 -1
  22. package/locales/fa-IR/models.json +7 -1
  23. package/locales/fr-FR/error.json +2 -2
  24. package/locales/fr-FR/modelProvider.json +1 -1
  25. package/locales/fr-FR/models.json +7 -1
  26. package/locales/it-IT/error.json +2 -2
  27. package/locales/it-IT/modelProvider.json +1 -1
  28. package/locales/it-IT/models.json +7 -1
  29. package/locales/ja-JP/error.json +2 -2
  30. package/locales/ja-JP/modelProvider.json +1 -1
  31. package/locales/ja-JP/models.json +7 -1
  32. package/locales/ko-KR/error.json +2 -2
  33. package/locales/ko-KR/modelProvider.json +1 -1
  34. package/locales/ko-KR/models.json +7 -1
  35. package/locales/nl-NL/error.json +2 -2
  36. package/locales/nl-NL/modelProvider.json +1 -1
  37. package/locales/nl-NL/models.json +7 -1
  38. package/locales/pl-PL/error.json +2 -2
  39. package/locales/pl-PL/modelProvider.json +1 -1
  40. package/locales/pl-PL/models.json +7 -1
  41. package/locales/pt-BR/error.json +2 -2
  42. package/locales/pt-BR/modelProvider.json +1 -1
  43. package/locales/pt-BR/models.json +7 -1
  44. package/locales/ru-RU/error.json +2 -2
  45. package/locales/ru-RU/modelProvider.json +1 -1
  46. package/locales/ru-RU/models.json +7 -1
  47. package/locales/tr-TR/error.json +2 -2
  48. package/locales/tr-TR/modelProvider.json +1 -1
  49. package/locales/tr-TR/models.json +7 -1
  50. package/locales/vi-VN/error.json +2 -2
  51. package/locales/vi-VN/modelProvider.json +1 -1
  52. package/locales/vi-VN/models.json +7 -1
  53. package/locales/zh-CN/error.json +2 -2
  54. package/locales/zh-CN/modelProvider.json +1 -1
  55. package/locales/zh-CN/models.json +7 -1
  56. package/locales/zh-TW/error.json +2 -2
  57. package/locales/zh-TW/modelProvider.json +1 -1
  58. package/locales/zh-TW/models.json +7 -1
  59. package/package.json +1 -1
  60. package/packages/model-bank/src/aiModels/novita.ts +3 -2
  61. package/packages/model-bank/src/aiModels/nvidia.ts +14 -0
  62. package/packages/model-bank/src/aiModels/ollamacloud.ts +23 -2
  63. package/packages/model-bank/src/aiModels/qwen.ts +88 -0
  64. package/packages/model-bank/src/aiModels/siliconcloud.ts +20 -0
  65. package/packages/model-bank/src/aiModels/vercelaigateway.ts +0 -17
  66. package/packages/model-bank/src/aiModels/volcengine.ts +1 -1
  67. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.test.ts +108 -64
  68. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.ts +150 -125
  69. package/packages/model-runtime/src/providers/newapi/index.test.ts +3 -75
  70. package/packages/model-runtime/src/providers/newapi/index.ts +1 -14
  71. package/packages/model-runtime/src/providers/openrouter/index.test.ts +3 -2
  72. package/packages/model-runtime/src/providers/openrouter/index.ts +1 -1
  73. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +19 -6
  74. package/src/app/[variants]/(main)/settings/provider/features/customProviderSdkOptions.ts +1 -0
  75. package/src/config/modelProviders/aihubmix.ts +1 -0
  76. package/src/config/modelProviders/newapi.ts +1 -0
  77. package/src/libs/trpc/client/lambda.ts +3 -1
  78. package/src/locales/default/modelProvider.ts +1 -1
@@ -138,10 +138,10 @@ export interface OpenAICompatibleFactoryOptions<T extends Record<string, any> =
138
138
  useToolsCalling?: boolean;
139
139
  };
140
140
  models?:
141
- | ((params: { client: OpenAI }) => Promise<ChatModelCard[]>)
142
- | {
143
- transformModel?: (model: OpenAI.Model) => ChatModelCard;
144
- };
141
+ | ((params: { client: OpenAI }) => Promise<ChatModelCard[]>)
142
+ | {
143
+ transformModel?: (model: OpenAI.Model) => ChatModelCard;
144
+ };
145
145
  provider: string;
146
146
  responses?: {
147
147
  handlePayload?: (
@@ -205,6 +205,81 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
205
205
  this.logPrefix = `lobe-model-runtime:${this.id}`;
206
206
  }
207
207
 
208
+ /**
209
+ * Determine if should use Responses API based on various configuration options
210
+ * @param params - Configuration parameters
211
+ * @returns true if should use Responses API, false otherwise
212
+ */
213
+ private shouldUseResponsesAPI(params: {
214
+ /** Context for logging (e.g., 'chat', 'generateObject', 'tool calling') */
215
+ context?: string;
216
+ /** Factory/instance level useResponse flag */
217
+ flagUseResponse?: boolean;
218
+ /** Factory/instance level model patterns for Responses API */
219
+ flagUseResponseModels?: Array<string | RegExp>;
220
+ /** The model ID to check */
221
+ model?: string;
222
+ /** Explicit responseApi flag */
223
+ responseApi?: boolean;
224
+ /** User-specified API mode (highest priority) */
225
+ userApiMode?: string;
226
+ }): boolean {
227
+ const {
228
+ model,
229
+ userApiMode,
230
+ responseApi,
231
+ flagUseResponse,
232
+ flagUseResponseModels,
233
+ context = 'operation',
234
+ } = params;
235
+
236
+ const log = debug(`${this.logPrefix}:shouldUseResponsesAPI`);
237
+
238
+ // Priority 1: User explicitly set apiMode via switch
239
+ if (userApiMode === 'responses') {
240
+ log('using Responses API: explicit userApiMode=%s', userApiMode);
241
+ return true;
242
+ }
243
+
244
+ // Priority 2: userApiMode is explicitly set to something else
245
+ if (userApiMode !== undefined) {
246
+ log('using Chat Completions API: userApiMode=%s', userApiMode);
247
+ return false;
248
+ }
249
+
250
+ // Priority 3: Explicit responseApi flag
251
+ if (responseApi) {
252
+ log('using Responses API: explicit responseApi flag for %s', context);
253
+ return true;
254
+ }
255
+
256
+ // Priority 4: Factory/instance level useResponse flag
257
+ if (flagUseResponse) {
258
+ log('using Responses API: flagUseResponse=true for %s', context);
259
+ return true;
260
+ }
261
+
262
+ // Priority 5: Check if model matches useResponseModels patterns
263
+ if (model && flagUseResponseModels?.length) {
264
+ const matches = flagUseResponseModels.some((m: string | RegExp) =>
265
+ typeof m === 'string' ? model.includes(m) : (m as RegExp).test(model),
266
+ );
267
+ if (matches) {
268
+ log('using Responses API: model %s matches useResponseModels config', model);
269
+ return true;
270
+ }
271
+ }
272
+
273
+ // Priority 6: Check built-in responsesAPIModels
274
+ if (model && responsesAPIModels.has(model)) {
275
+ log('using Responses API: model %s in built-in responsesAPIModels', model);
276
+ return true;
277
+ }
278
+
279
+ log('using Chat Completions API for %s', context);
280
+ return false;
281
+ }
282
+
208
283
  async chat({ responseMode, ...payload }: ChatStreamPayload, options?: ChatMethodOptions) {
209
284
  try {
210
285
  const log = debug(`${this.logPrefix}:chat`);
@@ -212,41 +287,39 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
212
287
 
213
288
  log('chat called with model: %s, stream: %s', payload.model, payload.stream ?? true);
214
289
 
215
- // 工厂级 Responses API 路由控制(支持实例覆盖)
290
+ let processedPayload: any = payload;
291
+ const userApiMode = (payload as any).apiMode as string | undefined;
216
292
  const modelId = (payload as any).model as string | undefined;
217
- const shouldUseResponses = (() => {
218
- const instanceChat = ((this._options as any).chatCompletion || {}) as {
219
- useResponse?: boolean;
220
- useResponseModels?: Array<string | RegExp>;
221
- };
222
- const flagUseResponse =
223
- instanceChat.useResponse ?? (chatCompletion ? chatCompletion.useResponse : undefined);
224
- const flagUseResponseModels =
225
- instanceChat.useResponseModels ?? chatCompletion?.useResponseModels;
226
-
227
- if (!chatCompletion && !instanceChat) return false;
228
- if (flagUseResponse) return true;
229
- if (!modelId || !flagUseResponseModels?.length) return false;
230
- return flagUseResponseModels.some((m: string | RegExp) =>
231
- typeof m === 'string' ? modelId.includes(m) : (m as RegExp).test(modelId),
232
- );
233
- })();
234
293
 
235
- let processedPayload: any = payload;
294
+ const instanceChat = ((this._options as any).chatCompletion || {}) as {
295
+ useResponse?: boolean;
296
+ useResponseModels?: Array<string | RegExp>;
297
+ };
298
+ const flagUseResponse =
299
+ instanceChat.useResponse ?? (chatCompletion ? chatCompletion.useResponse : undefined);
300
+ const flagUseResponseModels =
301
+ instanceChat.useResponseModels ?? chatCompletion?.useResponseModels;
302
+
303
+ // Determine if should use Responses API
304
+ const shouldUseResponses = this.shouldUseResponsesAPI({
305
+ context: 'chat',
306
+ flagUseResponse,
307
+ flagUseResponseModels,
308
+ model: modelId,
309
+ userApiMode,
310
+ });
311
+
236
312
  if (shouldUseResponses) {
237
- log('using Responses API mode');
238
313
  processedPayload = { ...payload, apiMode: 'responses' } as any;
239
- } else {
240
- log('using Chat Completions API mode');
241
314
  }
242
315
 
243
316
  // 再进行工厂级处理
244
317
  const postPayload = chatCompletion?.handlePayload
245
318
  ? chatCompletion.handlePayload(processedPayload, this._options)
246
319
  : ({
247
- ...processedPayload,
248
- stream: processedPayload.stream ?? true,
249
- } as OpenAI.ChatCompletionCreateParamsStreaming);
320
+ ...processedPayload,
321
+ stream: processedPayload.stream ?? true,
322
+ } as OpenAI.ChatCompletionCreateParamsStreaming);
250
323
 
251
324
  if ((postPayload as any).apiMode === 'responses') {
252
325
  return this.handleResponseAPIMode(processedPayload, options);
@@ -312,13 +385,13 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
312
385
  return StreamingResponse(
313
386
  chatCompletion?.handleStream
314
387
  ? chatCompletion.handleStream(prod, {
315
- callbacks: streamOptions.callbacks,
316
- inputStartAt,
317
- })
388
+ callbacks: streamOptions.callbacks,
389
+ inputStartAt,
390
+ })
318
391
  : OpenAIStream(prod, {
319
- ...streamOptions,
320
- inputStartAt,
321
- }),
392
+ ...streamOptions,
393
+ inputStartAt,
394
+ }),
322
395
  {
323
396
  headers: options?.headers,
324
397
  },
@@ -342,9 +415,9 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
342
415
  return StreamingResponse(
343
416
  chatCompletion?.handleStream
344
417
  ? chatCompletion.handleStream(stream, {
345
- callbacks: streamOptions.callbacks,
346
- inputStartAt,
347
- })
418
+ callbacks: streamOptions.callbacks,
419
+ inputStartAt,
420
+ })
348
421
  : OpenAIStream(stream, { ...streamOptions, enableStreaming: false, inputStartAt }),
349
422
  {
350
423
  headers: options?.headers,
@@ -500,47 +573,23 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
500
573
  }
501
574
 
502
575
  // Factory-level Responses API routing control (supports instance override)
503
- const shouldUseResponses = (() => {
504
- const instanceGenerateObject = ((this._options as any).generateObject || {}) as {
505
- useResponse?: boolean;
506
- useResponseModels?: Array<string | RegExp>;
507
- };
508
- const flagUseResponse =
509
- instanceGenerateObject.useResponse ??
510
- (generateObjectConfig ? generateObjectConfig.useResponse : undefined);
511
- const flagUseResponseModels =
512
- instanceGenerateObject.useResponseModels ?? generateObjectConfig?.useResponseModels;
513
-
514
- if (responseApi) {
515
- log('using Responses API due to explicit responseApi flag');
516
- return true;
517
- }
518
-
519
- if (flagUseResponse) {
520
- log('using Responses API due to useResponse flag');
521
- return true;
522
- }
523
-
524
- // Use factory-configured model list if provided
525
- if (model && flagUseResponseModels?.length) {
526
- const matches = flagUseResponseModels.some((m: string | RegExp) =>
527
- typeof m === 'string' ? model.includes(m) : (m as RegExp).test(model),
528
- );
529
- if (matches) {
530
- log('using Responses API: model %s matches useResponseModels config', model);
531
- return true;
532
- }
533
- }
534
-
535
- // Default: use built-in responsesAPIModels
536
- if (model && responsesAPIModels.has(model)) {
537
- log('using Responses API: model %s in built-in responsesAPIModels', model);
538
- return true;
539
- }
540
-
541
- log('using Chat Completions API for generateObject');
542
- return false;
543
- })();
576
+ const instanceGenerateObject = ((this._options as any).generateObject || {}) as {
577
+ useResponse?: boolean;
578
+ useResponseModels?: Array<string | RegExp>;
579
+ };
580
+ const flagUseResponse =
581
+ instanceGenerateObject.useResponse ??
582
+ (generateObjectConfig ? generateObjectConfig.useResponse : undefined);
583
+ const flagUseResponseModels =
584
+ instanceGenerateObject.useResponseModels ?? generateObjectConfig?.useResponseModels;
585
+
586
+ const shouldUseResponses = this.shouldUseResponsesAPI({
587
+ context: 'generateObject',
588
+ flagUseResponse,
589
+ flagUseResponseModels,
590
+ model,
591
+ responseApi,
592
+ });
544
593
 
545
594
  // Apply schema transformation if configured
546
595
  const processedSchema = generateObjectConfig?.handleSchema
@@ -790,11 +839,11 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
790
839
  ...res,
791
840
  ...(reasoning || reasoning_effort
792
841
  ? {
793
- reasoning: {
794
- ...reasoning,
795
- ...(reasoning_effort && { effort: reasoning_effort }),
796
- },
797
- }
842
+ reasoning: {
843
+ ...reasoning,
844
+ ...(reasoning_effort && { effort: reasoning_effort }),
845
+ },
846
+ }
798
847
  : {}),
799
848
  input,
800
849
  ...(max_tokens && { max_output_tokens: max_tokens }),
@@ -885,47 +934,23 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
885
934
  );
886
935
 
887
936
  // Factory-level Responses API routing control (supports instance override)
888
- const shouldUseResponses = (() => {
889
- const instanceGenerateObject = ((this._options as any).generateObject || {}) as {
890
- useResponse?: boolean;
891
- useResponseModels?: Array<string | RegExp>;
892
- };
893
- const flagUseResponse =
894
- instanceGenerateObject.useResponse ??
895
- (generateObjectConfig ? generateObjectConfig.useResponse : undefined);
896
- const flagUseResponseModels =
897
- instanceGenerateObject.useResponseModels ?? generateObjectConfig?.useResponseModels;
898
-
899
- if (responseApi) {
900
- log('using Responses API due to explicit responseApi flag');
901
- return true;
902
- }
903
-
904
- if (flagUseResponse) {
905
- log('using Responses API due to useResponse flag');
906
- return true;
907
- }
908
-
909
- // Use factory-configured model list if provided
910
- if (model && flagUseResponseModels?.length) {
911
- const matches = flagUseResponseModels.some((m: string | RegExp) =>
912
- typeof m === 'string' ? model.includes(m) : (m as RegExp).test(model),
913
- );
914
- if (matches) {
915
- log('using Responses API: model %s matches useResponseModels config', model);
916
- return true;
917
- }
918
- }
919
-
920
- // Default: use built-in responsesAPIModels
921
- if (model && responsesAPIModels.has(model)) {
922
- log('using Responses API: model %s in built-in responsesAPIModels', model);
923
- return true;
924
- }
925
-
926
- log('using Chat Completions API for tool calling');
927
- return false;
928
- })();
937
+ const instanceGenerateObject = ((this._options as any).generateObject || {}) as {
938
+ useResponse?: boolean;
939
+ useResponseModels?: Array<string | RegExp>;
940
+ };
941
+ const flagUseResponse =
942
+ instanceGenerateObject.useResponse ??
943
+ (generateObjectConfig ? generateObjectConfig.useResponse : undefined);
944
+ const flagUseResponseModels =
945
+ instanceGenerateObject.useResponseModels ?? generateObjectConfig?.useResponseModels;
946
+
947
+ const shouldUseResponses = this.shouldUseResponsesAPI({
948
+ context: 'tool calling',
949
+ flagUseResponse,
950
+ flagUseResponseModels,
951
+ model,
952
+ responseApi,
953
+ });
929
954
 
930
955
  if (shouldUseResponses) {
931
956
  log('calling responses.create for tool calling');
@@ -5,7 +5,7 @@ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
5
  import { responsesAPIModels } from '../../const/models';
6
6
  import { ChatStreamPayload } from '../../types/chat';
7
7
  import * as modelParseModule from '../../utils/modelParse';
8
- import { LobeNewAPIAI, NewAPIModelCard, NewAPIPricing, handlePayload, params } from './index';
8
+ import { LobeNewAPIAI, NewAPIModelCard, NewAPIPricing, params } from './index';
9
9
 
10
10
  // Mock external dependencies
11
11
  vi.mock('../../utils/modelParse');
@@ -701,78 +701,6 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
701
701
  });
702
702
  });
703
703
 
704
- describe('HandlePayload Function - Direct Testing', () => {
705
- beforeEach(() => {
706
- // Mock responsesAPIModels as a Set for testing
707
- (responsesAPIModels as any).has = vi.fn((model: string) => model === 'o1-pro');
708
- });
709
-
710
- it('should add apiMode for models in responsesAPIModels set', () => {
711
- (responsesAPIModels as any).has = vi.fn((model: string) => model === 'o1-pro');
712
-
713
- const payload: ChatStreamPayload = {
714
- model: 'o1-pro',
715
- messages: [{ role: 'user', content: 'test' }],
716
- temperature: 0.5,
717
- };
718
-
719
- const result = handlePayload(payload);
720
- expect(result).toEqual({ ...payload, apiMode: 'responses' });
721
- });
722
-
723
- it('should add apiMode for gpt- models', () => {
724
- (responsesAPIModels as any).has = vi.fn(() => false);
725
-
726
- const payload: ChatStreamPayload = {
727
- model: 'gpt-4o',
728
- messages: [{ role: 'user', content: 'test' }],
729
- temperature: 0.5,
730
- };
731
-
732
- const result = handlePayload(payload);
733
- expect(result).toEqual({ ...payload, apiMode: 'responses' });
734
- });
735
-
736
- it('should add apiMode for o1 models', () => {
737
- (responsesAPIModels as any).has = vi.fn(() => false);
738
-
739
- const payload: ChatStreamPayload = {
740
- model: 'o1-mini',
741
- messages: [{ role: 'user', content: 'test' }],
742
- temperature: 0.5,
743
- };
744
-
745
- const result = handlePayload(payload);
746
- expect(result).toEqual({ ...payload, apiMode: 'responses' });
747
- });
748
-
749
- it('should add apiMode for o3 models', () => {
750
- (responsesAPIModels as any).has = vi.fn(() => false);
751
-
752
- const payload: ChatStreamPayload = {
753
- model: 'o3-turbo',
754
- messages: [{ role: 'user', content: 'test' }],
755
- temperature: 0.5,
756
- };
757
-
758
- const result = handlePayload(payload);
759
- expect(result).toEqual({ ...payload, apiMode: 'responses' });
760
- });
761
-
762
- it('should not modify payload for regular models', () => {
763
- (responsesAPIModels as any).has = vi.fn(() => false);
764
-
765
- const payload: ChatStreamPayload = {
766
- model: 'claude-3-sonnet',
767
- messages: [{ role: 'user', content: 'test' }],
768
- temperature: 0.5,
769
- };
770
-
771
- const result = handlePayload(payload);
772
- expect(result).toEqual(payload);
773
- });
774
- });
775
-
776
704
  describe('Routers Function - Direct Testing', () => {
777
705
  it('should generate routers with correct apiTypes', () => {
778
706
  const options = { apiKey: 'test', baseURL: 'https://api.newapi.com/v1' };
@@ -823,11 +751,11 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
823
751
  expect(routers[3].options.baseURL).toBe('https://custom.com/v1');
824
752
  });
825
753
 
826
- it('should configure openai router with handlePayload', () => {
754
+ it('should configure openai router with useResponseModels', () => {
827
755
  const options = { apiKey: 'test', baseURL: 'https://custom.com/v1' };
828
756
  const routers = params.routers(options);
829
757
 
830
- expect((routers[3].options as any).chatCompletion?.handlePayload).toBe(handlePayload);
758
+ expect((routers[3].options as any).chatCompletion?.useResponseModels).toBeDefined();
831
759
  });
832
760
 
833
761
  it('should filter anthropic models for anthropic router', () => {
@@ -4,7 +4,6 @@ import urlJoin from 'url-join';
4
4
  import { responsesAPIModels } from '../../const/models';
5
5
  import { createRouterRuntime } from '../../core/RouterRuntime';
6
6
  import { CreateRouterRuntimeOptions } from '../../core/RouterRuntime/createRuntime';
7
- import { ChatStreamPayload } from '../../types/chat';
8
7
  import { detectModelProvider, processMultiProviderModelList } from '../../utils/modelParse';
9
8
 
10
9
  export interface NewAPIModelCard {
@@ -26,18 +25,6 @@ export interface NewAPIPricing {
26
25
  supported_endpoint_types?: string[];
27
26
  }
28
27
 
29
- export const handlePayload = (payload: ChatStreamPayload) => {
30
- // Handle OpenAI responses API mode
31
- if (
32
- responsesAPIModels.has(payload.model) ||
33
- payload.model.includes('gpt-') ||
34
- /^o\d/.test(payload.model)
35
- ) {
36
- return { ...payload, apiMode: 'responses' };
37
- }
38
- return payload;
39
- };
40
-
41
28
  export const params = {
42
29
  debug: {
43
30
  chatCompletion: () => process.env.DEBUG_NEWAPI_CHAT_COMPLETION === '1',
@@ -178,7 +165,7 @@ export const params = {
178
165
  ...options,
179
166
  baseURL: urlJoin(userBaseURL, '/v1'),
180
167
  chatCompletion: {
181
- handlePayload,
168
+ useResponseModels: [...Array.from(responsesAPIModels), /gpt-\d(?!\d)/, /^o\d/],
182
169
  },
183
170
  },
184
171
  },
@@ -1548,8 +1548,9 @@ describe('LobeOpenRouterAI - custom features', () => {
1548
1548
  const models = await params.models();
1549
1549
 
1550
1550
  const mixedModel = models.find((m) => m.id === 'mixed-free/model');
1551
- // Input or output is 0, so should be marked as free
1552
- expect(mixedModel?.displayName).toContain('(free)');
1551
+ // Input or output is 0. Current behavior does not append '(free)' for mixed pricing,
1552
+ // so assert the displayName equals the cleaned model name.
1553
+ expect(mixedModel?.displayName).toBe('Mixed Free Model');
1553
1554
  });
1554
1555
 
1555
1556
  it('should handle very large pricing values', async () => {
@@ -99,7 +99,7 @@ export const params = {
99
99
  const cachedInputPrice = formatPrice(pricing.input_cache_read);
100
100
  const writeCacheInputPrice = formatPrice(pricing.input_cache_write);
101
101
 
102
- const isFree = (inputPrice === 0 || outputPrice === 0) && !displayName.endsWith('(free)');
102
+ const isFree = inputPrice === 0 && outputPrice === 0 && !displayName.endsWith('(free)');
103
103
  if (isFree) {
104
104
  displayName += ' (free)';
105
105
  }
@@ -42,6 +42,15 @@ const CreateNewProvider = memo<CreateNewProviderProps>(({ onClose, open }) => {
42
42
  name: values.name || values.id,
43
43
  };
44
44
 
45
+ // 只为 openai 和 router (newapi) 类型的自定义 provider 添加 supportResponsesApi: true
46
+ const sdkType = values.settings?.sdkType;
47
+ if (sdkType === 'openai' || sdkType === 'router') {
48
+ finalValues.settings = {
49
+ ...finalValues.settings,
50
+ supportResponsesApi: true,
51
+ };
52
+ }
53
+
45
54
  await createNewAiProvider(finalValues);
46
55
  setLoading(false);
47
56
  navigate(`/settings?active=provider&provider=${values.id}`);
@@ -102,12 +111,16 @@ const CreateNewProvider = memo<CreateNewProviderProps>(({ onClose, open }) => {
102
111
  {
103
112
  children: (
104
113
  <Select
105
- optionRender={({ label, value }) => (
106
- <Flexbox align={'center'} gap={8} horizontal>
107
- <ProviderIcon provider={value as string} size={18} />
108
- {label}
109
- </Flexbox>
110
- )}
114
+ optionRender={({ label, value }) => {
115
+ // Map 'router' to 'newapi' for displaying the correct icon
116
+ const iconProvider = value === 'router' ? 'newapi' : (value as string);
117
+ return (
118
+ <Flexbox align={'center'} gap={8} horizontal>
119
+ <ProviderIcon provider={iconProvider} size={18} />
120
+ {label}
121
+ </Flexbox>
122
+ );
123
+ }}
111
124
  options={CUSTOM_PROVIDER_SDK_OPTIONS}
112
125
  placeholder={t('createNewAiProvider.sdkType.placeholder')}
113
126
  variant={'filled'}
@@ -9,4 +9,5 @@ export const CUSTOM_PROVIDER_SDK_OPTIONS = [
9
9
  { label: 'Qwen', value: 'qwen' },
10
10
  { label: 'Volcengine', value: 'volcengine' },
11
11
  { label: 'Ollama', value: 'ollama' },
12
+ { label: 'New API', value: 'router' },
12
13
  ] satisfies { label: string; value: AiProviderSDKType }[];
@@ -11,6 +11,7 @@ const AiHubMix: ModelProviderCard = {
11
11
  settings: {
12
12
  sdkType: 'router',
13
13
  showModelFetcher: true,
14
+ supportResponsesApi: true,
14
15
  },
15
16
  url: 'https://aihubmix.com?utm_source=lobehub',
16
17
  };
@@ -13,6 +13,7 @@ const NewAPI: ModelProviderCard = {
13
13
  },
14
14
  sdkType: 'router',
15
15
  showModelFetcher: true,
16
+ supportResponsesApi: true,
16
17
  },
17
18
  url: 'https://github.com/Calcium-Ion/new-api',
18
19
  };
@@ -52,8 +52,10 @@ const errorHandlingLink: TRPCLink<LambdaRouter> = () => {
52
52
  }
53
53
 
54
54
  default: {
55
- if (fetchErrorNotification)
55
+ console.error(err);
56
+ if (fetchErrorNotification && status) {
56
57
  fetchErrorNotification.error({ errorMessage: err.message, status });
58
+ }
57
59
  }
58
60
  }
59
61
  }
@@ -298,7 +298,7 @@ export default {
298
298
  },
299
299
  helpDoc: '配置教程',
300
300
  responsesApi: {
301
- desc: '采用 OpenAI 新一代请求格式规范,解锁思维链等进阶特性',
301
+ desc: '采用 OpenAI 新一代请求格式规范,解锁思维链等进阶特性 (仅 OpenAI 模型支持)',
302
302
  title: '使用 Responses API 规范',
303
303
  },
304
304
  waitingForMore: '更多模型正在 <1>计划接入</1> 中,敬请期待',