@lobehub/chat 0.147.7 → 0.147.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/README.md +2 -2
  3. package/package.json +1 -1
  4. package/src/app/api/config/__snapshots__/route.test.ts.snap +45 -43
  5. package/src/app/api/config/parseDefaultAgent.test.ts +1 -1
  6. package/src/app/api/config/route.test.ts +2 -10
  7. package/src/app/api/config/route.ts +37 -16
  8. package/src/app/api/openai/createBizOpenAI/index.ts +3 -13
  9. package/src/app/api/openai/images/route.ts +1 -1
  10. package/src/app/api/openai/stt/route.ts +1 -1
  11. package/src/app/api/openai/tts/route.ts +1 -1
  12. package/src/app/chat/(desktop)/features/ChatHeader/Tags.tsx +1 -1
  13. package/src/app/chat/(desktop)/features/ChatInput/Footer/index.tsx +1 -1
  14. package/src/app/settings/llm/Azure/index.tsx +2 -2
  15. package/src/app/settings/llm/components/ProviderModelList/ModelFetcher.tsx +7 -3
  16. package/src/app/settings/llm/components/ProviderModelList/Option.tsx +4 -1
  17. package/src/app/settings/llm/components/ProviderModelList/index.tsx +4 -4
  18. package/src/config/__tests__/server.test.ts +0 -12
  19. package/src/config/server/provider.ts +12 -8
  20. package/src/features/AgentSetting/AgentConfig/ModelSelect.tsx +5 -2
  21. package/src/features/AgentSetting/AgentPrompt/TokenTag.tsx +1 -1
  22. package/src/features/ChatInput/ActionBar/FileUpload.tsx +2 -2
  23. package/src/features/ChatInput/ActionBar/Token/TokenTag.tsx +1 -1
  24. package/src/features/ChatInput/ActionBar/Token/index.tsx +1 -1
  25. package/src/features/ChatInput/ActionBar/Tools/index.tsx +1 -1
  26. package/src/features/ChatInput/useChatInput.ts +1 -1
  27. package/src/features/Conversation/Error/APIKeyForm/ProviderApiKeyForm.tsx +3 -3
  28. package/src/features/ModelSwitchPanel/index.tsx +5 -2
  29. package/src/migrations/FromV3ToV4/fixtures/openai-output-v4.json +1 -3
  30. package/src/migrations/FromV3ToV4/fixtures/openrouter-output-v4.json +2 -6
  31. package/src/migrations/FromV3ToV4/index.ts +8 -2
  32. package/src/services/_auth.ts +1 -3
  33. package/src/services/chat.ts +4 -5
  34. package/src/store/global/slices/settings/actions/llm.test.ts +2 -2
  35. package/src/store/global/slices/settings/actions/llm.ts +3 -3
  36. package/src/store/global/slices/settings/selectors/modelConfig.test.ts +0 -88
  37. package/src/store/global/slices/settings/selectors/modelConfig.ts +17 -81
  38. package/src/store/global/slices/settings/selectors/modelProvider.test.ts +99 -15
  39. package/src/store/global/slices/settings/selectors/modelProvider.ts +94 -30
  40. package/src/store/global/slices/settings/selectors/settings.ts +7 -1
  41. package/src/types/serverConfig.ts +1 -0
  42. package/src/types/settings/modelProvider.ts +0 -2
  43. package/src/utils/parseModels.test.ts +146 -0
  44. package/src/utils/parseModels.ts +67 -15
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 0.147.8](https://github.com/lobehub/lobe-chat/compare/v0.147.7...v0.147.8)
6
+
7
+ <sup>Released on **2024-04-12**</sup>
8
+
9
+ #### ♻ Code Refactoring
10
+
11
+ - **misc**: Update README.md.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Code refactoring
19
+
20
+ - **misc**: Update README.md ([44b5a23](https://github.com/lobehub/lobe-chat/commit/44b5a23))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 0.147.7](https://github.com/lobehub/lobe-chat/compare/v0.147.6...v0.147.7)
6
31
 
7
32
  <sup>Released on **2024-04-12**</sup>
package/README.md CHANGED
@@ -123,12 +123,12 @@ We have implemented support for the following model service providers:
123
123
  - **AWS Bedrock**: Integrated with AWS Bedrock service, supporting models such as **Claude / LLama2**, providing powerful natural language processing capabilities. [Learn more](https://aws.amazon.com/cn/bedrock)
124
124
  - **Anthropic (Claude)**: Accessed Anthropic's **Claude** series models, including Claude 3 and Claude 2, with breakthroughs in multi-modal capabilities and extended context, setting a new industry benchmark. [Learn more](https://www.anthropic.com/claude)
125
125
  - **Google AI (Gemini Pro, Gemini Vision)**: Access to Google's **Gemini** series models, including Gemini and Gemini Pro, to support advanced language understanding and generation. [Learn more](https://deepmind.google/technologies/gemini/)
126
- - **ChatGLM**: Added the **ChatGLM** series models from Zhipuai (GLM-4/GLM-4-vision/GLM-3-turbo), providing users with another efficient conversation model choice. [Learn more](https://www.zhipuai.cn/)
127
- - **Moonshot AI (Dark Side of the Moon)**: Integrated with the Moonshot series models, an innovative AI startup from China, aiming to provide deeper conversation understanding. [Learn more](https://www.moonshot.cn/)
128
126
  - **Groq**: Accessed Groq's AI models, efficiently processing message sequences and generating responses, capable of multi-turn dialogues and single-interaction tasks. [Learn more](https://groq.com/)
129
127
  - **OpenRouter**: Supports routing of models including **Claude 3**, **Gemma**, **Mistral**, **Llama2** and **Cohere**, with intelligent routing optimization to improve usage efficiency, open and flexible. [Learn more](https://openrouter.ai/)
130
128
  - **01.AI (Yi Model)**: Integrated the 01.AI models, with series of APIs featuring fast inference speed, which not only shortened the processing time, but also maintained excellent model performance. [Learn more](https://01.ai/)
131
129
  - **Together.ai**: Over 100 leading open-source Chat, Language, Image, Code, and Embedding models are available through the Together Inference API. For these models you pay just for what you use. [Learn more](https://www.together.ai/)
130
+ - **ChatGLM**: Added the **ChatGLM** series models from Zhipuai (GLM-4/GLM-4-vision/GLM-3-turbo), providing users with another efficient conversation model choice. [Learn more](https://www.zhipuai.cn/)
131
+ - **Moonshot AI (Dark Side of the Moon)**: Integrated with the Moonshot series models, an innovative AI startup from China, aiming to provide deeper conversation understanding. [Learn more](https://www.moonshot.cn/)
132
132
 
133
133
  At the same time, we are also planning to support more model service providers, such as Replicate and Perplexity, to further enrich our service provider library. If you would like LobeChat to support your favorite service provider, feel free to join our [community discussion](https://github.com/lobehub/lobe-chat/discussions/1284).
134
134
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "0.147.7",
3
+ "version": "0.147.8",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -5,16 +5,12 @@ exports[`GET /api/config > Model Provider env > CUSTOM_MODELS > custom deletion,
5
5
  {
6
6
  "displayName": "llama",
7
7
  "enabled": true,
8
- "functionCall": true,
9
8
  "id": "llama",
10
- "vision": true,
11
9
  },
12
10
  {
13
11
  "displayName": "claude-2",
14
12
  "enabled": true,
15
- "functionCall": true,
16
13
  "id": "claude-2",
17
- "vision": true,
18
14
  },
19
15
  {
20
16
  "displayName": "gpt-4-32k",
@@ -27,29 +23,32 @@ exports[`GET /api/config > Model Provider env > CUSTOM_MODELS > custom deletion,
27
23
  `;
28
24
 
29
25
  exports[`GET /api/config > Model Provider env > OPENAI_MODEL_LIST > custom deletion, addition, and renaming of models 1`] = `
30
- [
31
- {
32
- "displayName": "llama",
33
- "enabled": true,
34
- "functionCall": true,
35
- "id": "llama",
36
- "vision": true,
37
- },
38
- {
39
- "displayName": "claude-2",
40
- "enabled": true,
41
- "functionCall": true,
42
- "id": "claude-2",
43
- "vision": true,
44
- },
45
- {
46
- "displayName": "gpt-4-32k",
47
- "enabled": true,
48
- "functionCall": true,
49
- "id": "gpt-4-0125-preview",
50
- "tokens": 128000,
51
- },
52
- ]
26
+ {
27
+ "enabledModels": [
28
+ "llama",
29
+ "claude-2",
30
+ "gpt-4-0125-preview",
31
+ ],
32
+ "serverModelCards": [
33
+ {
34
+ "displayName": "llama",
35
+ "enabled": true,
36
+ "id": "llama",
37
+ },
38
+ {
39
+ "displayName": "claude-2",
40
+ "enabled": true,
41
+ "id": "claude-2",
42
+ },
43
+ {
44
+ "displayName": "gpt-4-32k",
45
+ "enabled": true,
46
+ "functionCall": true,
47
+ "id": "gpt-4-0125-preview",
48
+ "tokens": 128000,
49
+ },
50
+ ],
51
+ }
53
52
  `;
54
53
 
55
54
  exports[`GET /api/config > Model Provider env > OPENAI_MODEL_LIST > should work correct with gpt-4 1`] = `
@@ -108,20 +107,23 @@ exports[`GET /api/config > Model Provider env > OPENAI_MODEL_LIST > should work
108
107
  `;
109
108
 
110
109
  exports[`GET /api/config > Model Provider env > OPENROUTER_MODEL_LIST > custom deletion, addition, and renaming of models 1`] = `
111
- [
112
- {
113
- "displayName": "google/gemma-7b-it",
114
- "enabled": true,
115
- "functionCall": true,
116
- "id": "google/gemma-7b-it",
117
- "vision": true,
118
- },
119
- {
120
- "displayName": "Mistral-7B-Instruct",
121
- "enabled": true,
122
- "functionCall": true,
123
- "id": "mistralai/mistral-7b-instruct",
124
- "vision": true,
125
- },
126
- ]
110
+ {
111
+ "enabled": false,
112
+ "enabledModels": [
113
+ "google/gemma-7b-it",
114
+ "mistralai/mistral-7b-instruct",
115
+ ],
116
+ "serverModelCards": [
117
+ {
118
+ "displayName": "google/gemma-7b-it",
119
+ "enabled": true,
120
+ "id": "google/gemma-7b-it",
121
+ },
122
+ {
123
+ "displayName": "Mistral-7B-Instruct",
124
+ "enabled": true,
125
+ "id": "mistralai/mistral-7b-instruct",
126
+ },
127
+ ],
128
+ }
127
129
  `;
@@ -87,7 +87,7 @@ describe('parseAgentConfig', () => {
87
87
  });
88
88
 
89
89
  describe('complex environment', () => {
90
- it.skip('parses a complete environment variable string correctly', () => {
90
+ it('parses environment variable string correctly', () => {
91
91
  const envStr =
92
92
  'model=gpt-4-1106-preview;params.max_tokens=300;plugins=search-engine,lobe-image-designer';
93
93
  const expected = {
@@ -23,7 +23,7 @@ describe('GET /api/config', () => {
23
23
 
24
24
  const jsonResponse: GlobalServerConfig = await response.json();
25
25
 
26
- const result = jsonResponse.languageModel?.openai?.serverModelCards;
26
+ const result = jsonResponse.languageModel?.openai;
27
27
 
28
28
  expect(result).toMatchSnapshot();
29
29
  process.env.OPENAI_MODEL_LIST = '';
@@ -101,31 +101,23 @@ describe('GET /api/config', () => {
101
101
 
102
102
  expect(result).toContainEqual({
103
103
  displayName: 'model1',
104
- functionCall: true,
105
104
  id: 'model1',
106
105
  enabled: true,
107
- vision: true,
108
106
  });
109
107
  expect(result).toContainEqual({
110
108
  displayName: 'model2',
111
- functionCall: true,
112
109
  enabled: true,
113
110
  id: 'model2',
114
- vision: true,
115
111
  });
116
112
  expect(result).toContainEqual({
117
113
  displayName: 'model3',
118
114
  enabled: true,
119
- functionCall: true,
120
115
  id: 'model3',
121
- vision: true,
122
116
  });
123
117
  expect(result).toContainEqual({
124
118
  displayName: 'model4',
125
- functionCall: true,
126
119
  enabled: true,
127
120
  id: 'model4',
128
- vision: true,
129
121
  });
130
122
 
131
123
  process.env.OPENAI_MODEL_LIST = '';
@@ -159,7 +151,7 @@ describe('GET /api/config', () => {
159
151
  const res = await GET();
160
152
  const data: GlobalServerConfig = await res.json();
161
153
 
162
- const result = data.languageModel?.openrouter?.serverModelCards;
154
+ const result = data.languageModel?.openrouter;
163
155
 
164
156
  expect(result).toMatchSnapshot();
165
157
 
@@ -1,11 +1,12 @@
1
1
  import {
2
2
  OllamaProviderCard,
3
+ OpenAIProviderCard,
3
4
  OpenRouterProviderCard,
4
5
  TogetherAIProviderCard,
5
6
  } from '@/config/modelProviders';
6
7
  import { getServerConfig } from '@/config/server';
7
8
  import { GlobalServerConfig } from '@/types/serverConfig';
8
- import { transformToChatModelCards } from '@/utils/parseModels';
9
+ import { extractEnabledModels, transformToChatModelCards } from '@/utils/parseModels';
9
10
 
10
11
  import { parseAgentConfig } from './parseDefaultAgent';
11
12
 
@@ -31,6 +32,9 @@ export const GET = async () => {
31
32
  ENABLED_ANTHROPIC,
32
33
  ENABLED_MISTRAL,
33
34
 
35
+ ENABLED_AZURE_OPENAI,
36
+ AZURE_MODEL_LIST,
37
+
34
38
  ENABLE_OLLAMA,
35
39
  OLLAMA_MODEL_LIST,
36
40
 
@@ -49,38 +53,55 @@ export const GET = async () => {
49
53
 
50
54
  enabledOAuthSSO: ENABLE_OAUTH_SSO,
51
55
  languageModel: {
52
- anthropic: { enabled: ENABLED_ANTHROPIC },
56
+ anthropic: {
57
+ enabled: ENABLED_ANTHROPIC,
58
+ },
59
+ azure: {
60
+ enabled: ENABLED_AZURE_OPENAI,
61
+ enabledModels: extractEnabledModels(AZURE_MODEL_LIST, true),
62
+ serverModelCards: transformToChatModelCards({
63
+ defaultChatModels: [],
64
+ modelString: AZURE_MODEL_LIST,
65
+ withDeploymentName: true,
66
+ }),
67
+ },
53
68
  bedrock: { enabled: ENABLED_AWS_BEDROCK },
54
69
  google: { enabled: ENABLED_GOOGLE },
55
70
  groq: { enabled: ENABLED_GROQ },
56
71
  mistral: { enabled: ENABLED_MISTRAL },
57
72
  moonshot: { enabled: ENABLED_MOONSHOT },
58
-
59
73
  ollama: {
60
74
  enabled: ENABLE_OLLAMA,
61
- serverModelCards: transformToChatModelCards(
62
- OLLAMA_MODEL_LIST,
63
- OllamaProviderCard.chatModels,
64
- ),
75
+ serverModelCards: transformToChatModelCards({
76
+ defaultChatModels: OllamaProviderCard.chatModels,
77
+ modelString: OLLAMA_MODEL_LIST,
78
+ }),
65
79
  },
66
80
  openai: {
67
- serverModelCards: transformToChatModelCards(OPENAI_MODEL_LIST),
81
+ enabledModels: extractEnabledModels(OPENAI_MODEL_LIST),
82
+ serverModelCards: transformToChatModelCards({
83
+ defaultChatModels: OpenAIProviderCard.chatModels,
84
+ modelString: OPENAI_MODEL_LIST,
85
+ }),
68
86
  },
87
+
69
88
  openrouter: {
70
89
  enabled: ENABLED_OPENROUTER,
71
- serverModelCards: transformToChatModelCards(
72
- OPENROUTER_MODEL_LIST,
73
- OpenRouterProviderCard.chatModels,
74
- ),
90
+ enabledModels: extractEnabledModels(OPENROUTER_MODEL_LIST),
91
+ serverModelCards: transformToChatModelCards({
92
+ defaultChatModels: OpenRouterProviderCard.chatModels,
93
+ modelString: OPENROUTER_MODEL_LIST,
94
+ }),
75
95
  },
76
96
  perplexity: { enabled: ENABLED_PERPLEXITY },
77
97
 
78
98
  togetherai: {
79
99
  enabled: ENABLED_TOGETHERAI,
80
- serverModelCards: transformToChatModelCards(
81
- TOGETHERAI_MODEL_LIST,
82
- TogetherAIProviderCard.chatModels,
83
- ),
100
+ enabledModels: extractEnabledModels(TOGETHERAI_MODEL_LIST),
101
+ serverModelCards: transformToChatModelCards({
102
+ defaultChatModels: TogetherAIProviderCard.chatModels,
103
+ modelString: TOGETHERAI_MODEL_LIST,
104
+ }),
84
105
  },
85
106
 
86
107
  zeroone: { enabled: ENABLED_ZEROONE },
@@ -1,21 +1,18 @@
1
1
  import OpenAI from 'openai';
2
2
 
3
3
  import { checkAuth } from '@/app/api/auth';
4
- import { getServerConfig } from '@/config/server';
5
4
  import { getOpenAIAuthFromRequest } from '@/const/fetch';
6
5
  import { ChatErrorType, ErrorType } from '@/types/fetch';
7
6
 
8
7
  import { createErrorResponse } from '../../errorResponse';
9
- import { createAzureOpenai } from './createAzureOpenai';
10
8
  import { createOpenai } from './createOpenai';
11
9
 
12
10
  /**
13
11
  * createOpenAI Instance with Auth and azure openai support
14
12
  * if auth not pass ,just return error response
15
13
  */
16
- export const createBizOpenAI = (req: Request, model: string): Response | OpenAI => {
17
- const { apiKey, accessCode, endpoint, useAzure, apiVersion, oauthAuthorized } =
18
- getOpenAIAuthFromRequest(req);
14
+ export const createBizOpenAI = (req: Request): Response | OpenAI => {
15
+ const { apiKey, accessCode, endpoint, oauthAuthorized } = getOpenAIAuthFromRequest(req);
19
16
 
20
17
  const result = checkAuth({ accessCode, apiKey, oauthAuthorized });
21
18
 
@@ -25,15 +22,8 @@ export const createBizOpenAI = (req: Request, model: string): Response | OpenAI
25
22
 
26
23
  let openai: OpenAI;
27
24
 
28
- const { USE_AZURE_OPENAI } = getServerConfig();
29
- const useAzureOpenAI = useAzure || USE_AZURE_OPENAI;
30
-
31
25
  try {
32
- if (useAzureOpenAI) {
33
- openai = createAzureOpenai({ apiVersion, endpoint, model, userApiKey: apiKey });
34
- } else {
35
- openai = createOpenai(apiKey, endpoint);
36
- }
26
+ openai = createOpenai(apiKey, endpoint);
37
27
  } catch (error) {
38
28
  if ((error as Error).cause === ChatErrorType.NoOpenAIAPIKey) {
39
29
  return createErrorResponse(ChatErrorType.NoOpenAIAPIKey);
@@ -8,7 +8,7 @@ export const runtime = 'edge';
8
8
  export const POST = async (req: Request) => {
9
9
  const payload = (await req.json()) as OpenAIImagePayload;
10
10
 
11
- const openaiOrErrResponse = createBizOpenAI(req, payload.model);
11
+ const openaiOrErrResponse = createBizOpenAI(req);
12
12
  // if resOrOpenAI is a Response, it means there is an error,just return it
13
13
  if (openaiOrErrResponse instanceof Response) return openaiOrErrResponse;
14
14
 
@@ -16,7 +16,7 @@ export const POST = async (req: Request) => {
16
16
  speech: speechBlob,
17
17
  } as OpenAISTTPayload;
18
18
 
19
- const openaiOrErrResponse = createBizOpenAI(req, payload.options.model);
19
+ const openaiOrErrResponse = createBizOpenAI(req);
20
20
 
21
21
  // if resOrOpenAI is a Response, it means there is an error,just return it
22
22
  if (openaiOrErrResponse instanceof Response) return openaiOrErrResponse;
@@ -10,7 +10,7 @@ export const preferredRegion = getPreferredRegion();
10
10
  export const POST = async (req: Request) => {
11
11
  const payload = (await req.json()) as OpenAITTSPayload;
12
12
 
13
- const openaiOrErrResponse = createBizOpenAI(req, payload.options.model);
13
+ const openaiOrErrResponse = createBizOpenAI(req);
14
14
 
15
15
  // if resOrOpenAI is a Response, it means there is an error,just return it
16
16
  if (openaiOrErrResponse instanceof Response) return openaiOrErrResponse;
@@ -16,7 +16,7 @@ const TitleTags = memo(() => {
16
16
  agentSelectors.currentAgentPlugins(s),
17
17
  ]);
18
18
 
19
- const showPlugin = useGlobalStore(modelProviderSelectors.modelEnabledFunctionCall(model));
19
+ const showPlugin = useGlobalStore(modelProviderSelectors.isModelEnabledFunctionCall(model));
20
20
 
21
21
  return (
22
22
  <Flexbox gap={8} horizontal>
@@ -71,7 +71,7 @@ const Footer = memo<{ setExpand?: (expand: boolean) => void }>(({ setExpand }) =
71
71
  ]);
72
72
 
73
73
  const model = useSessionStore(agentSelectors.currentAgentModel);
74
- const canUpload = useGlobalStore(modelProviderSelectors.modelEnabledUpload(model));
74
+ const canUpload = useGlobalStore(modelProviderSelectors.isModelEnabledUpload(model));
75
75
 
76
76
  const sendMessage = useSendMessage();
77
77
 
@@ -8,7 +8,7 @@ import { Flexbox } from 'react-layout-kit';
8
8
 
9
9
  import { ModelProvider } from '@/libs/agent-runtime';
10
10
  import { useGlobalStore } from '@/store/global';
11
- import { modelConfigSelectors } from '@/store/global/selectors';
11
+ import { modelProviderSelectors } from '@/store/global/selectors';
12
12
 
13
13
  import ProviderConfig from '../components/ProviderConfig';
14
14
  import { LLMProviderApiTokenKey, LLMProviderBaseUrlKey, LLMProviderConfigKey } from '../const';
@@ -34,7 +34,7 @@ const AzureOpenAIProvider = memo(() => {
34
34
 
35
35
  // Get the first model card's deployment name as the check model
36
36
  const checkModel = useGlobalStore((s) => {
37
- const chatModelCards = modelConfigSelectors.getModelCardsByProviderId(providerKey)(s);
37
+ const chatModelCards = modelProviderSelectors.getModelCardsById(providerKey)(s);
38
38
 
39
39
  if (chatModelCards.length > 0) {
40
40
  return chatModelCards[0].deploymentName;
@@ -8,7 +8,11 @@ import { useTranslation } from 'react-i18next';
8
8
  import { Flexbox } from 'react-layout-kit';
9
9
 
10
10
  import { useGlobalStore } from '@/store/global';
11
- import { modelConfigSelectors } from '@/store/global/selectors';
11
+ import {
12
+ modelConfigSelectors,
13
+ modelProviderSelectors,
14
+ settingsSelectors,
15
+ } from '@/store/global/selectors';
12
16
  import { GlobalLLMProviderKey } from '@/types/settings';
13
17
 
14
18
  const useStyles = createStyles(({ css, token }) => ({
@@ -38,10 +42,10 @@ const ModelFetcher = memo<ModelFetcherProps>(({ provider }) => {
38
42
  ]);
39
43
  const enabledAutoFetch = useGlobalStore(modelConfigSelectors.isAutoFetchModelsEnabled(provider));
40
44
  const latestFetchTime = useGlobalStore(
41
- (s) => modelConfigSelectors.getConfigByProviderId(provider)(s)?.latestFetchTime,
45
+ (s) => settingsSelectors.providerConfig(provider)(s)?.latestFetchTime,
42
46
  );
43
47
  const totalModels = useGlobalStore(
44
- (s) => modelConfigSelectors.getModelCardsByProviderId(provider)(s).length,
48
+ (s) => modelProviderSelectors.getModelCardsById(provider)(s).length,
45
49
  );
46
50
 
47
51
  const { mutate, isValidating } = useFetchProviderModelList(provider, enabledAutoFetch);
@@ -13,7 +13,10 @@ import CustomModelOption from './CustomModelOption';
13
13
 
14
14
  const OptionRender = memo<{ displayName: string; id: string; provider: GlobalLLMProviderKey }>(
15
15
  ({ displayName, id, provider }) => {
16
- const model = useGlobalStore((s) => modelProviderSelectors.modelCardById(id)(s), isEqual);
16
+ const model = useGlobalStore(
17
+ (s) => modelProviderSelectors.getDefaultModelCardById(id)(s),
18
+ isEqual,
19
+ );
17
20
 
18
21
  // if there is no model, it means it is a user custom model
19
22
  if (!model) return <CustomModelOption id={id} provider={provider} />;
@@ -8,7 +8,7 @@ import { useTranslation } from 'react-i18next';
8
8
  import { Flexbox } from 'react-layout-kit';
9
9
 
10
10
  import { useGlobalStore } from '@/store/global';
11
- import { modelConfigSelectors, modelProviderSelectors } from '@/store/global/selectors';
11
+ import { modelProviderSelectors } from '@/store/global/selectors';
12
12
  import { GlobalLLMProviderKey } from '@/types/settings';
13
13
 
14
14
  import ModelConfigModal from './ModelConfigModal';
@@ -51,16 +51,16 @@ const ProviderModelListSelect = memo<CustomModelSelectProps>(
51
51
  ]);
52
52
 
53
53
  const chatModelCards = useGlobalStore(
54
- modelConfigSelectors.getModelCardsByProviderId(provider),
54
+ modelProviderSelectors.getModelCardsById(provider),
55
55
  isEqual,
56
56
  );
57
57
 
58
58
  const defaultEnableModel = useGlobalStore(
59
- modelProviderSelectors.defaultEnabledProviderModels(provider),
59
+ modelProviderSelectors.getDefaultEnabledModelsById(provider),
60
60
  isEqual,
61
61
  );
62
62
  const enabledModels = useGlobalStore(
63
- modelConfigSelectors.getEnableModelsByProviderId(provider),
63
+ modelProviderSelectors.getEnableModelsById(provider),
64
64
  isEqual,
65
65
  );
66
66
 
@@ -27,18 +27,6 @@ describe('getServerConfig', () => {
27
27
  global.process = originalProcess; // Restore the original process object
28
28
  });
29
29
 
30
- it('correctly reflects boolean value for USE_AZURE_OPENAI', () => {
31
- process.env.USE_AZURE_OPENAI = '1';
32
- const config = getServerConfig();
33
- expect(config.USE_AZURE_OPENAI).toBe(true);
34
- });
35
-
36
- it('correctly handles falsy values for USE_AZURE_OPENAI', () => {
37
- process.env.USE_AZURE_OPENAI = '0';
38
- const config = getServerConfig();
39
- expect(config.USE_AZURE_OPENAI).toBe(false);
40
- });
41
-
42
30
  it('correctly handles values for OPENAI_FUNCTION_REGIONS', () => {
43
31
  process.env.OPENAI_FUNCTION_REGIONS = 'iad1,sfo1';
44
32
  const config = getServerConfig();
@@ -18,7 +18,6 @@ declare global {
18
18
  AZURE_API_KEY?: string;
19
19
  AZURE_ENDPOINT?: string;
20
20
  AZURE_API_VERSION?: string;
21
- USE_AZURE_OPENAI?: string;
22
21
 
23
22
  // ZhiPu Provider
24
23
  ENABLED_ZHIPU?: string;
@@ -97,6 +96,8 @@ export const getProviderConfig = () => {
97
96
  throw new Error('[Server Config] you are importing a server-only module outside of server');
98
97
  }
99
98
 
99
+ const AZURE_API_KEY = process.env.AZURE_API_KEY || '';
100
+
100
101
  const ZHIPU_API_KEY = process.env.ZHIPU_API_KEY || '';
101
102
  const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID || '';
102
103
 
@@ -118,6 +119,8 @@ export const getProviderConfig = () => {
118
119
 
119
120
  const TOGETHERAI_API_KEY = process.env.TOGETHERAI_API_KEY || '';
120
121
 
122
+ const OLLAMA_PROXY_URL = process.env.OLLAMA_PROXY_URL || '';
123
+
121
124
  // region format: iad1,sfo1
122
125
  let regions: string[] = [];
123
126
  if (process.env.OPENAI_FUNCTION_REGIONS) {
@@ -150,6 +153,12 @@ export const getProviderConfig = () => {
150
153
  OPENAI_MODEL_LIST: process.env.OPENAI_MODEL_LIST || process.env.CUSTOM_MODELS,
151
154
  OPENAI_FUNCTION_REGIONS: regions,
152
155
 
156
+ ENABLED_AZURE_OPENAI: !!AZURE_API_KEY,
157
+ AZURE_API_KEY,
158
+ AZURE_API_VERSION: process.env.AZURE_API_VERSION,
159
+ AZURE_ENDPOINT: process.env.AZURE_ENDPOINT,
160
+ AZURE_MODEL_LIST: process.env.AZURE_MODEL_LIST,
161
+
153
162
  ENABLED_ZHIPU: !!ZHIPU_API_KEY,
154
163
  ZHIPU_API_KEY,
155
164
 
@@ -191,13 +200,8 @@ export const getProviderConfig = () => {
191
200
  AWS_ACCESS_KEY_ID: AWS_ACCESS_KEY_ID,
192
201
  AWS_SECRET_ACCESS_KEY: process.env.AWS_SECRET_ACCESS_KEY || '',
193
202
 
194
- AZURE_API_KEY: process.env.AZURE_API_KEY,
195
- AZURE_API_VERSION: process.env.AZURE_API_VERSION,
196
- AZURE_ENDPOINT: process.env.AZURE_ENDPOINT,
197
- USE_AZURE_OPENAI: process.env.USE_AZURE_OPENAI === '1',
198
-
199
- ENABLE_OLLAMA: !!process.env.OLLAMA_PROXY_URL,
200
- OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
203
+ ENABLE_OLLAMA: !!OLLAMA_PROXY_URL,
204
+ OLLAMA_PROXY_URL: OLLAMA_PROXY_URL,
201
205
  OLLAMA_MODEL_LIST: process.env.OLLAMA_MODEL_LIST || process.env.OLLAMA_CUSTOM_MODELS,
202
206
  };
203
207
  };
@@ -5,7 +5,7 @@ import { memo, useMemo } from 'react';
5
5
 
6
6
  import { ModelItemRender, ProviderItemRender } from '@/components/ModelSelect';
7
7
  import { useGlobalStore } from '@/store/global';
8
- import { modelConfigSelectors } from '@/store/global/selectors';
8
+ import { modelProviderSelectors } from '@/store/global/selectors';
9
9
  import { ModelProviderCard } from '@/types/llm';
10
10
 
11
11
  import { useStore } from '../store';
@@ -25,7 +25,10 @@ interface ModelOption {
25
25
 
26
26
  const ModelSelect = memo(() => {
27
27
  const [model, updateConfig] = useStore((s) => [s.config.model, s.setAgentConfig]);
28
- const enabledList = useGlobalStore(modelConfigSelectors.providerListForModelSelect, isEqual);
28
+ const enabledList = useGlobalStore(
29
+ modelProviderSelectors.modelProviderListForModelSelect,
30
+ isEqual,
31
+ );
29
32
  const { styles } = useStyles();
30
33
 
31
34
  const options = useMemo<SelectProps['options']>(() => {
@@ -14,7 +14,7 @@ const Tokens = memo(() => {
14
14
  const [systemRole, model] = useStore((s) => [s.config.systemRole, s.config.model]);
15
15
  const systemTokenCount = useTokenCount(systemRole);
16
16
 
17
- const showTag = useGlobalStore(modelProviderSelectors.modelHasMaxToken(model));
17
+ const showTag = useGlobalStore(modelProviderSelectors.isModelHasMaxToken(model));
18
18
  const modelMaxTokens = useGlobalStore(modelProviderSelectors.modelMaxToken(model));
19
19
 
20
20
  return (
@@ -20,8 +20,8 @@ const FileUpload = memo(() => {
20
20
 
21
21
  const model = useSessionStore(agentSelectors.currentAgentModel);
22
22
  const [canUpload, enabledFiles] = useGlobalStore((s) => [
23
- modelProviderSelectors.modelEnabledUpload(model)(s),
24
- modelProviderSelectors.modelEnabledFiles(model)(s),
23
+ modelProviderSelectors.isModelEnabledUpload(model)(s),
24
+ modelProviderSelectors.isModelEnabledFiles(model)(s),
25
25
  ]);
26
26
 
27
27
  return (
@@ -32,7 +32,7 @@ const Token = memo(() => {
32
32
  const maxTokens = useGlobalStore(modelProviderSelectors.modelMaxToken(model));
33
33
 
34
34
  // Tool usage token
35
- const canUseTool = useGlobalStore(modelProviderSelectors.modelEnabledFunctionCall(model));
35
+ const canUseTool = useGlobalStore(modelProviderSelectors.isModelEnabledFunctionCall(model));
36
36
  const plugins = useSessionStore(agentSelectors.currentAgentPlugins);
37
37
  const toolsString = useToolStore((s) => {
38
38
  const pluginSystemRoles = toolSelectors.enabledSystemRoles(plugins)(s);
@@ -10,7 +10,7 @@ const LargeTokenContent = dynamic(() => import('./TokenTag'), { ssr: false });
10
10
 
11
11
  const Token = memo(() => {
12
12
  const model = useSessionStore(agentSelectors.currentAgentModel);
13
- const showTag = useGlobalStore(modelProviderSelectors.modelHasMaxToken(model));
13
+ const showTag = useGlobalStore(modelProviderSelectors.isModelHasMaxToken(model));
14
14
 
15
15
  return showTag && <LargeTokenContent />;
16
16
  });