@lobehub/lobehub 2.0.0-next.262 → 2.0.0-next.263

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/locales/zh-CN/chat.json +1 -0
  4. package/locales/zh-CN/modelProvider.json +20 -0
  5. package/package.json +1 -1
  6. package/packages/database/src/models/aiModel.ts +2 -0
  7. package/packages/database/src/repositories/aiInfra/index.test.ts +41 -1
  8. package/packages/database/src/repositories/aiInfra/index.ts +3 -1
  9. package/packages/model-runtime/src/providers/openrouter/index.test.ts +9 -55
  10. package/packages/model-runtime/src/providers/openrouter/index.ts +47 -27
  11. package/packages/model-runtime/src/providers/openrouter/type.ts +16 -28
  12. package/packages/model-runtime/src/providers/vercelaigateway/index.test.ts +6 -6
  13. package/packages/model-runtime/src/providers/vercelaigateway/index.ts +54 -11
  14. package/packages/model-runtime/src/utils/modelParse.test.ts +185 -3
  15. package/packages/model-runtime/src/utils/modelParse.ts +108 -1
  16. package/packages/types/src/llm.ts +3 -1
  17. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/ExtendParamsSelect.tsx +398 -0
  18. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/Form.tsx +11 -2
  19. package/src/app/[variants]/(main)/settings/provider/features/ModelList/CreateNewModelModal/__tests__/ExtendParamsSelect.test.tsx +59 -0
  20. package/src/features/ChatInput/ActionBar/Model/ControlsForm.tsx +1 -1
  21. package/src/features/ChatInput/ActionBar/Model/GPT51ReasoningEffortSlider.tsx +9 -54
  22. package/src/features/ChatInput/ActionBar/Model/GPT52ProReasoningEffortSlider.tsx +9 -53
  23. package/src/features/ChatInput/ActionBar/Model/GPT52ReasoningEffortSlider.tsx +9 -55
  24. package/src/features/ChatInput/ActionBar/Model/GPT5ReasoningEffortSlider.tsx +9 -54
  25. package/src/features/ChatInput/ActionBar/Model/ImageAspectRatioSelect.tsx +50 -16
  26. package/src/features/ChatInput/ActionBar/Model/ImageResolutionSlider.tsx +7 -53
  27. package/src/features/ChatInput/ActionBar/Model/LevelSlider.tsx +92 -0
  28. package/src/features/ChatInput/ActionBar/Model/ReasoningEffortSlider.tsx +9 -53
  29. package/src/features/ChatInput/ActionBar/Model/TextVerbositySlider.tsx +9 -53
  30. package/src/features/ChatInput/ActionBar/Model/ThinkingLevel2Slider.tsx +9 -52
  31. package/src/features/ChatInput/ActionBar/Model/ThinkingLevelSlider.tsx +9 -54
  32. package/src/features/ChatInput/ActionBar/Model/ThinkingSlider.tsx +20 -56
  33. package/src/features/ChatInput/ActionBar/Model/__tests__/createLevelSlider.test.tsx +126 -0
  34. package/src/features/ChatInput/ActionBar/Model/createLevelSlider.tsx +105 -0
  35. package/src/locales/default/chat.ts +1 -0
  36. package/src/locales/default/modelProvider.ts +38 -0
@@ -94,6 +94,16 @@ const mockDefaultModelList: (Partial<ChatModelCard> & { id: string })[] = [
94
94
  enabled: false,
95
95
  id: 'model-known-disabled',
96
96
  },
97
+ {
98
+ displayName: 'Known Model With Settings',
99
+ enabled: true,
100
+ id: 'model-known-settings',
101
+ settings: {
102
+ extendParams: ['enableReasoning'],
103
+ searchImpl: 'params',
104
+ searchProvider: 'builtin',
105
+ },
106
+ },
97
107
  ];
98
108
 
99
109
  // Mock the import
@@ -242,7 +252,11 @@ describe('modelParse', () => {
242
252
  describe('search and imageOutput (processModelList)', () => {
243
253
  it('openai: default search keywords should make "*-search" models support search', async () => {
244
254
  // openai config does not define searchKeywords, so DEFAULT_SEARCH_KEYWORDS ['-search'] applies
245
- const out = await processModelList([{ id: 'gpt-4o-search' }], MODEL_LIST_CONFIGS.openai, 'openai');
255
+ const out = await processModelList(
256
+ [{ id: 'gpt-4o-search' }],
257
+ MODEL_LIST_CONFIGS.openai,
258
+ 'openai',
259
+ );
246
260
  expect(out).toHaveLength(1);
247
261
  expect(out[0].search).toBe(true);
248
262
  });
@@ -276,7 +290,11 @@ describe('modelParse', () => {
276
290
  });
277
291
 
278
292
  it('google: gemini-* without "-image-" should not infer imageOutput and get search=true via known google model', async () => {
279
- const out = await processModelList([{ id: 'gemini-2.5-pro' }], MODEL_LIST_CONFIGS.google, 'google');
293
+ const out = await processModelList(
294
+ [{ id: 'gemini-2.5-pro' }],
295
+ MODEL_LIST_CONFIGS.google,
296
+ 'google',
297
+ );
280
298
  expect(out).toHaveLength(1);
281
299
  expect(out[0].displayName).toBe('Gemini 2.5 Pro');
282
300
  expect(out[0].search).toBe(true);
@@ -354,6 +372,36 @@ describe('modelParse', () => {
354
372
  expect(result.find((m) => m.id === 'model-known-disabled')!.enabled).toBe(false);
355
373
  expect(result.find((m) => m.id === 'unknown-model-for-enabled-test')!.enabled).toBe(false);
356
374
  });
375
+
376
+ it('should include settings from known model when remote data does not provide them', async () => {
377
+ const modelList = [{ id: 'model-known-settings' }];
378
+ const result = await processModelList(modelList, config);
379
+
380
+ const settings = result[0].settings;
381
+ expect(settings).toBeDefined();
382
+ expect(settings?.extendParams).toEqual(['enableReasoning']);
383
+ expect(settings?.searchImpl).toBe('params');
384
+ expect(settings?.searchProvider).toBe('builtin');
385
+ });
386
+
387
+ it('should merge extendParams from known and remote models while preserving uniqueness', async () => {
388
+ const modelList = [
389
+ {
390
+ id: 'model-known-settings',
391
+ settings: {
392
+ extendParams: ['reasoningBudgetToken', 'enableReasoning'],
393
+ searchImpl: 'tool',
394
+ },
395
+ },
396
+ ];
397
+
398
+ const result = await processModelList(modelList, config);
399
+ const settings = result[0].settings;
400
+
401
+ expect(settings?.extendParams).toEqual(['enableReasoning', 'reasoningBudgetToken']);
402
+ expect(settings?.searchImpl).toBe('tool');
403
+ expect(settings?.searchProvider).toBe('builtin');
404
+ });
357
405
  });
358
406
  });
359
407
 
@@ -497,7 +545,7 @@ describe('modelParse', () => {
497
545
  });
498
546
 
499
547
  it('default search keywords should make "*-search" models support search', async () => {
500
- const out = await processMultiProviderModelList([{ id: 'gpt-4o-search'}]);
548
+ const out = await processMultiProviderModelList([{ id: 'gpt-4o-search' }]);
501
549
  expect(out).toHaveLength(1);
502
550
  expect(out[0].search).toBe(true);
503
551
  });
@@ -674,6 +722,140 @@ describe('modelParse', () => {
674
722
  expect(glm.vision).toBe(true);
675
723
  });
676
724
 
725
+ it('should include known extendParams for OpenAI and Google providers regardless of providerid', async () => {
726
+ const mockModule = await import('model-bank');
727
+ mockModule.LOBE_DEFAULT_MODEL_LIST.push(
728
+ {
729
+ id: 'gpt-openai-extend-restricted',
730
+ displayName: 'OpenAI Extend Restricted',
731
+ settings: {
732
+ extendParams: ['openaiParam'],
733
+ },
734
+ } as any,
735
+ {
736
+ id: 'gemini-extend-restricted',
737
+ displayName: 'Gemini Extend Restricted',
738
+ settings: {
739
+ extendParams: ['thinkingBudget', 'urlContext'],
740
+ },
741
+ } as any,
742
+ );
743
+
744
+ const modelList = [
745
+ { id: 'gpt-openai-extend-restricted' },
746
+ { id: 'gemini-extend-restricted' },
747
+ ];
748
+
749
+ const result = await processMultiProviderModelList(modelList, 'vercelaigateway');
750
+
751
+ const openaiModel = result.find((m) => m.id === 'gpt-openai-extend-restricted');
752
+ const googleModel = result.find((m) => m.id === 'gemini-extend-restricted');
753
+
754
+ // Both OpenAI and Google providers always include known extendParams
755
+ expect(openaiModel?.settings?.extendParams).toEqual(['openaiParam']);
756
+ expect(googleModel?.settings?.extendParams).toEqual(['thinkingBudget', 'urlContext']);
757
+ });
758
+
759
+ it('should allow known extendParams for non-OpenAI providers when provider is aihubmix', async () => {
760
+ const mockModule = await import('model-bank');
761
+ mockModule.LOBE_DEFAULT_MODEL_LIST.push({
762
+ id: 'gemini-extend-aihubmix',
763
+ displayName: 'Gemini Extend Aihubmix',
764
+ settings: {
765
+ extendParams: ['thinkingBudget', 'urlContext'],
766
+ },
767
+ } as any);
768
+
769
+ const modelList = [{ id: 'gemini-extend-aihubmix' }];
770
+
771
+ const result = await processMultiProviderModelList(modelList, 'aihubmix');
772
+
773
+ const googleModel = result.find((m) => m.id === 'gemini-extend-aihubmix');
774
+
775
+ expect(googleModel?.settings?.extendParams).toEqual(['thinkingBudget', 'urlContext']);
776
+ });
777
+
778
+ it('should omit search settings when provider is neither aihubmix nor newapi', async () => {
779
+ const mockModule = await import('model-bank');
780
+ const initialLength = mockModule.LOBE_DEFAULT_MODEL_LIST.length;
781
+ mockModule.LOBE_DEFAULT_MODEL_LIST.push({
782
+ id: 'search-settings-model',
783
+ displayName: 'Search Settings Model',
784
+ settings: {
785
+ searchImpl: 'params',
786
+ searchProvider: 'builtin',
787
+ },
788
+ } as any);
789
+
790
+ try {
791
+ const result = await processMultiProviderModelList(
792
+ [{ id: 'search-settings-model' }],
793
+ 'vercelaigateway',
794
+ );
795
+
796
+ const model = result.find((m) => m.id === 'search-settings-model');
797
+
798
+ expect(model?.settings?.searchImpl).toBeUndefined();
799
+ expect(model?.settings?.searchProvider).toBeUndefined();
800
+ } finally {
801
+ mockModule.LOBE_DEFAULT_MODEL_LIST.splice(initialLength);
802
+ }
803
+ });
804
+
805
+ it('should include search settings when provider is aihubmix', async () => {
806
+ const mockModule = await import('model-bank');
807
+ const initialLength = mockModule.LOBE_DEFAULT_MODEL_LIST.length;
808
+ mockModule.LOBE_DEFAULT_MODEL_LIST.push({
809
+ id: 'search-settings-aihubmix',
810
+ displayName: 'Search Settings Aihubmix',
811
+ settings: {
812
+ searchImpl: 'params',
813
+ searchProvider: 'builtin',
814
+ },
815
+ } as any);
816
+
817
+ try {
818
+ const result = await processMultiProviderModelList(
819
+ [{ id: 'search-settings-aihubmix' }],
820
+ 'aihubmix',
821
+ );
822
+
823
+ const model = result.find((m) => m.id === 'search-settings-aihubmix');
824
+
825
+ expect(model?.settings?.searchImpl).toBe('params');
826
+ expect(model?.settings?.searchProvider).toBe('builtin');
827
+ } finally {
828
+ mockModule.LOBE_DEFAULT_MODEL_LIST.splice(initialLength);
829
+ }
830
+ });
831
+
832
+ it('should include search settings when provider is newapi', async () => {
833
+ const mockModule = await import('model-bank');
834
+ const initialLength = mockModule.LOBE_DEFAULT_MODEL_LIST.length;
835
+ mockModule.LOBE_DEFAULT_MODEL_LIST.push({
836
+ id: 'search-settings-newapi',
837
+ displayName: 'Search Settings NewAPI',
838
+ settings: {
839
+ searchImpl: 'params',
840
+ searchProvider: 'builtin',
841
+ },
842
+ } as any);
843
+
844
+ try {
845
+ const result = await processMultiProviderModelList(
846
+ [{ id: 'search-settings-newapi' }],
847
+ 'newapi',
848
+ );
849
+
850
+ const model = result.find((m) => m.id === 'search-settings-newapi');
851
+
852
+ expect(model?.settings?.searchImpl).toBe('params');
853
+ expect(model?.settings?.searchProvider).toBe('builtin');
854
+ } finally {
855
+ mockModule.LOBE_DEFAULT_MODEL_LIST.splice(initialLength);
856
+ }
857
+ });
858
+
677
859
  it('should correctly handle models with excluded keywords in different providers', async () => {
678
860
  // OpenAI excludes 'audio', other providers don't have excluded keywords
679
861
  const modelList = [
@@ -1,5 +1,6 @@
1
1
  import type { ChatModelCard } from '@lobechat/types';
2
2
  import { AIBaseModelCard } from 'model-bank';
3
+ import type { AiModelSettings, ExtendParamsType } from 'model-bank';
3
4
 
4
5
  import type { ModelProviderKey } from '../types';
5
6
 
@@ -344,6 +345,75 @@ const processDisplayName = (displayName: string): string => {
344
345
  return displayName;
345
346
  };
346
347
 
348
+ const mergeExtendParams = (
349
+ modelExtendParams?: ReadonlyArray<ExtendParamsType>,
350
+ knownExtendParams?: ReadonlyArray<ExtendParamsType>,
351
+ options?: { includeKnownExtendParams?: boolean },
352
+ ): ExtendParamsType[] | undefined => {
353
+ const includeKnown = options?.includeKnownExtendParams ?? true;
354
+
355
+ const combined = [
356
+ ...(includeKnown ? (knownExtendParams ?? []) : []),
357
+ ...(modelExtendParams ?? []),
358
+ ];
359
+
360
+ if (combined.length === 0) return undefined;
361
+
362
+ return Array.from(new Set(combined));
363
+ };
364
+
365
+ const mergeSettings = (
366
+ modelSettings?: AiModelSettings,
367
+ knownSettings?: AiModelSettings,
368
+ options?: { includeKnownExtendParams?: boolean; includeSearchSettings?: boolean },
369
+ ): AiModelSettings | undefined => {
370
+ if (!modelSettings && !knownSettings) return undefined;
371
+
372
+ const merged: AiModelSettings = {};
373
+
374
+ if (knownSettings) {
375
+ Object.assign(merged, knownSettings);
376
+ }
377
+
378
+ if (modelSettings) {
379
+ Object.assign(merged, modelSettings);
380
+ }
381
+
382
+ const extendParams = mergeExtendParams(
383
+ modelSettings?.extendParams,
384
+ knownSettings?.extendParams,
385
+ options,
386
+ );
387
+ if (extendParams) {
388
+ merged.extendParams = extendParams;
389
+ } else {
390
+ delete merged.extendParams;
391
+ }
392
+
393
+ const includeSearchSettings = options?.includeSearchSettings ?? true;
394
+
395
+ if (includeSearchSettings) {
396
+ const searchImpl = modelSettings?.searchImpl ?? knownSettings?.searchImpl;
397
+ if (searchImpl) {
398
+ merged.searchImpl = searchImpl;
399
+ } else {
400
+ delete merged.searchImpl;
401
+ }
402
+
403
+ const searchProvider = modelSettings?.searchProvider ?? knownSettings?.searchProvider;
404
+ if (searchProvider) {
405
+ merged.searchProvider = searchProvider;
406
+ } else {
407
+ delete merged.searchProvider;
408
+ }
409
+ } else {
410
+ delete merged.searchImpl;
411
+ delete merged.searchProvider;
412
+ }
413
+
414
+ return Object.keys(merged).length > 0 ? merged : undefined;
415
+ };
416
+
347
417
  /**
348
418
  * Get the local configuration of the model provider
349
419
  * @param provider Model provider
@@ -389,6 +459,7 @@ const processModelCard = (
389
459
  model: { [key: string]: any; id: string },
390
460
  config: ModelProcessorConfig,
391
461
  knownModel?: any,
462
+ options?: { includeKnownExtendParams?: boolean; includeSearchSettings?: boolean },
392
463
  ): ChatModelCard | undefined => {
393
464
  const {
394
465
  functionCallKeywords = [],
@@ -421,6 +492,8 @@ const processModelCard = (
421
492
  return undefined;
422
493
  }
423
494
 
495
+ const mergedSettings = mergeSettings(model.settings, knownModel?.settings, options);
496
+
424
497
  const formatPricing = (pricing?: {
425
498
  cachedInput?: number;
426
499
  input?: number;
@@ -510,6 +583,7 @@ const processModelCard = (
510
583
  ...(modelType === 'image' && {
511
584
  parameters: model.parameters ?? knownModel?.parameters,
512
585
  }),
586
+ ...(mergedSettings ? { settings: mergedSettings } : {}),
513
587
  video:
514
588
  model.video ??
515
589
  knownModel?.abilities?.video ??
@@ -606,13 +680,46 @@ export const processMultiProviderModelList = async (
606
680
  );
607
681
  }
608
682
 
683
+ const includeKnownExtendParams =
684
+ providerid === 'aihubmix' ||
685
+ providerid === 'newapi' ||
686
+ detectedProvider === 'openai' ||
687
+ detectedProvider === 'google';
688
+ const includeSearchSettings = providerid === 'aihubmix' || providerid === 'newapi';
689
+
609
690
  // If providerid is provided and has local configuration, try to get the model's enabled status from it
610
691
  const providerLocalModelConfig = getModelLocalEnableConfig(
611
692
  providerLocalConfig as any[],
612
693
  model,
613
694
  );
614
695
 
615
- const processedModel = processModelCard(model, config, knownModel);
696
+ const processedModel = processModelCard(model, config, knownModel, {
697
+ includeKnownExtendParams,
698
+ includeSearchSettings,
699
+ });
700
+
701
+ if (processedModel && includeSearchSettings && providerLocalModelConfig?.settings) {
702
+ const localSettings = providerLocalModelConfig.settings as AiModelSettings | undefined;
703
+ const searchImpl = localSettings?.searchImpl;
704
+ const searchProvider = localSettings?.searchProvider;
705
+
706
+ if (searchImpl || searchProvider) {
707
+ const updatedSettings: AiModelSettings = processedModel.settings
708
+ ? { ...processedModel.settings }
709
+ : ({} as AiModelSettings);
710
+
711
+ if (searchImpl) {
712
+ updatedSettings.searchImpl = searchImpl;
713
+ }
714
+
715
+ if (searchProvider) {
716
+ updatedSettings.searchProvider = searchProvider;
717
+ }
718
+
719
+ processedModel.settings =
720
+ Object.keys(updatedSettings).length > 0 ? updatedSettings : undefined;
721
+ }
722
+ }
616
723
 
617
724
  // If model is found in local configuration, use its enabled status
618
725
  if (
@@ -1,4 +1,4 @@
1
- import { AiModelType, ModelParamsSchema, Pricing } from 'model-bank';
1
+ import { AiModelSettings, AiModelType, ModelParamsSchema, Pricing } from 'model-bank';
2
2
  import { ReactNode } from 'react';
3
3
 
4
4
  import { AiProviderSettings } from './aiProvider';
@@ -64,6 +64,8 @@ export interface ChatModelCard {
64
64
  */
65
65
  search?: boolean;
66
66
 
67
+ settings?: AiModelSettings;
68
+
67
69
  type?: AiModelType;
68
70
 
69
71
  /**