@lobehub/chat 1.29.6 → 1.31.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/.env.example +5 -0
  2. package/CHANGELOG.md +50 -0
  3. package/Dockerfile +2 -0
  4. package/Dockerfile.database +2 -0
  5. package/docs/usage/features/database.zh-CN.mdx +3 -3
  6. package/locales/ar/modelProvider.json +12 -0
  7. package/locales/bg-BG/modelProvider.json +12 -0
  8. package/locales/de-DE/modelProvider.json +12 -0
  9. package/locales/en-US/modelProvider.json +12 -0
  10. package/locales/es-ES/modelProvider.json +12 -0
  11. package/locales/fr-FR/modelProvider.json +12 -0
  12. package/locales/it-IT/modelProvider.json +12 -0
  13. package/locales/ja-JP/modelProvider.json +12 -0
  14. package/locales/ko-KR/modelProvider.json +12 -0
  15. package/locales/nl-NL/modelProvider.json +12 -0
  16. package/locales/pl-PL/modelProvider.json +12 -0
  17. package/locales/pt-BR/modelProvider.json +12 -0
  18. package/locales/ru-RU/modelProvider.json +12 -0
  19. package/locales/tr-TR/modelProvider.json +12 -0
  20. package/locales/vi-VN/modelProvider.json +12 -0
  21. package/locales/zh-CN/modelProvider.json +12 -0
  22. package/locales/zh-TW/modelProvider.json +12 -0
  23. package/package.json +2 -2
  24. package/src/app/(main)/settings/llm/ProviderList/Cloudflare/index.tsx +43 -0
  25. package/src/app/(main)/settings/llm/ProviderList/providers.tsx +6 -0
  26. package/src/config/llm.ts +17 -0
  27. package/src/config/modelProviders/cloudflare.ts +89 -0
  28. package/src/config/modelProviders/index.ts +8 -0
  29. package/src/config/modelProviders/xai.ts +29 -0
  30. package/src/const/auth.ts +2 -0
  31. package/src/const/settings/llm.ts +10 -0
  32. package/src/libs/agent-runtime/AgentRuntime.ts +14 -1
  33. package/src/libs/agent-runtime/cloudflare/index.test.ts +648 -0
  34. package/src/libs/agent-runtime/cloudflare/index.ts +123 -0
  35. package/src/libs/agent-runtime/types/type.ts +2 -0
  36. package/src/libs/agent-runtime/utils/cloudflareHelpers.test.ts +339 -0
  37. package/src/libs/agent-runtime/utils/cloudflareHelpers.ts +134 -0
  38. package/src/libs/agent-runtime/xai/index.test.ts +255 -0
  39. package/src/libs/agent-runtime/xai/index.ts +10 -0
  40. package/src/locales/default/modelProvider.ts +13 -1
  41. package/src/server/globalConfig/index.ts +16 -0
  42. package/src/server/modules/AgentRuntime/index.ts +18 -0
  43. package/src/services/_auth.ts +9 -0
  44. package/src/services/chat.ts +7 -0
  45. package/src/store/user/slices/modelList/selectors/keyVaults.ts +2 -0
  46. package/src/store/user/slices/modelList/selectors/modelConfig.ts +2 -0
  47. package/src/types/user/settings/keyVaults.ts +7 -0
@@ -0,0 +1,89 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref https://developers.cloudflare.com/workers-ai/models/#text-generation
4
+ // api https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility
5
+ const Cloudflare: ModelProviderCard = {
6
+ chatModels: [
7
+ {
8
+ displayName: 'deepseek-coder-6.7b-instruct-awq',
9
+ enabled: true,
10
+ id: '@hf/thebloke/deepseek-coder-6.7b-instruct-awq',
11
+ tokens: 16_384,
12
+ },
13
+ {
14
+ displayName: 'gemma-7b-it',
15
+ enabled: true,
16
+ id: '@hf/google/gemma-7b-it',
17
+ tokens: 2048,
18
+ },
19
+ {
20
+ displayName: 'hermes-2-pro-mistral-7b',
21
+ enabled: true,
22
+ // functionCall: true,
23
+ id: '@hf/nousresearch/hermes-2-pro-mistral-7b',
24
+ tokens: 4096,
25
+ },
26
+ {
27
+ displayName: 'llama-3-8b-instruct-awq',
28
+ id: '@cf/meta/llama-3-8b-instruct-awq',
29
+ tokens: 8192,
30
+ },
31
+ {
32
+ displayName: 'mistral-7b-instruct-v0.2',
33
+ id: '@hf/mistral/mistral-7b-instruct-v0.2',
34
+ tokens: 4096,
35
+ },
36
+ {
37
+ displayName: 'neural-chat-7b-v3-1-awq',
38
+ enabled: true,
39
+ id: '@hf/thebloke/neural-chat-7b-v3-1-awq',
40
+ tokens: 32_768,
41
+ },
42
+ {
43
+ displayName: 'openchat-3.5-0106',
44
+ id: '@cf/openchat/openchat-3.5-0106',
45
+ tokens: 8192,
46
+ },
47
+ {
48
+ displayName: 'openhermes-2.5-mistral-7b-awq',
49
+ enabled: true,
50
+ id: '@hf/thebloke/openhermes-2.5-mistral-7b-awq',
51
+ tokens: 32_768,
52
+ },
53
+ {
54
+ displayName: 'qwen1.5-14b-chat-awq',
55
+ enabled: true,
56
+ id: '@cf/qwen/qwen1.5-14b-chat-awq',
57
+ tokens: 32_768,
58
+ },
59
+ {
60
+ displayName: 'starling-lm-7b-beta',
61
+ enabled: true,
62
+ id: '@hf/nexusflow/starling-lm-7b-beta',
63
+ tokens: 4096,
64
+ },
65
+ {
66
+ displayName: 'zephyr-7b-beta-awq',
67
+ enabled: true,
68
+ id: '@hf/thebloke/zephyr-7b-beta-awq',
69
+ tokens: 32_768,
70
+ },
71
+ {
72
+ description:
73
+ 'Generation over generation, Meta Llama 3 demonstrates state-of-the-art performance on a wide range of industry benchmarks and offers new capabilities, including improved reasoning.\t',
74
+ displayName: 'meta-llama-3-8b-instruct',
75
+ enabled: true,
76
+ functionCall: false,
77
+ id: '@hf/meta-llama/meta-llama-3-8b-instruct',
78
+ },
79
+ ],
80
+ checkModel: '@hf/meta-llama/meta-llama-3-8b-instruct',
81
+ id: 'cloudflare',
82
+ modelList: {
83
+ showModelFetcher: true,
84
+ },
85
+ name: 'Cloudflare Workers AI',
86
+ url: 'https://developers.cloudflare.com/workers-ai/models',
87
+ };
88
+
89
+ export default Cloudflare;
@@ -6,6 +6,7 @@ import AnthropicProvider from './anthropic';
6
6
  import AzureProvider from './azure';
7
7
  import BaichuanProvider from './baichuan';
8
8
  import BedrockProvider from './bedrock';
9
+ import CloudflareProvider from './cloudflare';
9
10
  import DeepSeekProvider from './deepseek';
10
11
  import FireworksAIProvider from './fireworksai';
11
12
  import GithubProvider from './github';
@@ -30,6 +31,7 @@ import TaichuProvider from './taichu';
30
31
  import TogetherAIProvider from './togetherai';
31
32
  import UpstageProvider from './upstage';
32
33
  import WenxinProvider from './wenxin';
34
+ import XAIProvider from './xai';
33
35
  import ZeroOneProvider from './zeroone';
34
36
  import ZhiPuProvider from './zhipu';
35
37
 
@@ -52,11 +54,13 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
52
54
  PerplexityProvider.chatModels,
53
55
  AnthropicProvider.chatModels,
54
56
  HuggingFaceProvider.chatModels,
57
+ XAIProvider.chatModels,
55
58
  ZeroOneProvider.chatModels,
56
59
  StepfunProvider.chatModels,
57
60
  NovitaProvider.chatModels,
58
61
  BaichuanProvider.chatModels,
59
62
  TaichuProvider.chatModels,
63
+ CloudflareProvider.chatModels,
60
64
  Ai360Provider.chatModels,
61
65
  SiliconCloudProvider.chatModels,
62
66
  UpstageProvider.chatModels,
@@ -86,6 +90,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
86
90
  MistralProvider,
87
91
  Ai21Provider,
88
92
  UpstageProvider,
93
+ XAIProvider,
89
94
  QwenProvider,
90
95
  WenxinProvider,
91
96
  HunyuanProvider,
@@ -99,6 +104,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
99
104
  MinimaxProvider,
100
105
  Ai360Provider,
101
106
  TaichuProvider,
107
+ CloudflareProvider,
102
108
  SiliconCloudProvider,
103
109
  ];
104
110
 
@@ -117,6 +123,7 @@ export { default as AnthropicProviderCard } from './anthropic';
117
123
  export { default as AzureProviderCard } from './azure';
118
124
  export { default as BaichuanProviderCard } from './baichuan';
119
125
  export { default as BedrockProviderCard } from './bedrock';
126
+ export { default as CloudflareProviderCard } from './cloudflare';
120
127
  export { default as DeepSeekProviderCard } from './deepseek';
121
128
  export { default as FireworksAIProviderCard } from './fireworksai';
122
129
  export { default as GithubProviderCard } from './github';
@@ -141,5 +148,6 @@ export { default as TaichuProviderCard } from './taichu';
141
148
  export { default as TogetherAIProviderCard } from './togetherai';
142
149
  export { default as UpstageProviderCard } from './upstage';
143
150
  export { default as WenxinProviderCard } from './wenxin';
151
+ export { default as XAIProviderCard } from './xai';
144
152
  export { default as ZeroOneProviderCard } from './zeroone';
145
153
  export { default as ZhiPuProviderCard } from './zhipu';
@@ -0,0 +1,29 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref: https://x.ai/about
4
+ const XAI: ModelProviderCard = {
5
+ chatModels: [
6
+ {
7
+ description: '拥有与 Grok 2 相当的性能,但具有更高的效率、速度和功能。',
8
+ displayName: 'Grok Beta',
9
+ enabled: true,
10
+ functionCall: true,
11
+ id: 'grok-beta',
12
+ pricing: {
13
+ input: 5,
14
+ output: 15,
15
+ },
16
+ tokens: 131_072,
17
+ },
18
+ ],
19
+ checkModel: 'grok-beta',
20
+ description:
21
+ 'xAI 是一家致力于构建人工智能以加速人类科学发现的公司。我们的使命是推动我们对宇宙的共同理解。',
22
+ id: 'xai',
23
+ modelList: { showModelFetcher: true },
24
+ modelsUrl: 'https://docs.x.ai/docs#models',
25
+ name: 'xAI',
26
+ url: 'https://console.x.ai',
27
+ };
28
+
29
+ export default XAI;
package/src/const/auth.ts CHANGED
@@ -37,6 +37,8 @@ export interface JWTPayload {
37
37
  awsSecretAccessKey?: string;
38
38
  awsSessionToken?: string;
39
39
 
40
+ cloudflareBaseURLOrAccountID?: string;
41
+
40
42
  wenxinAccessKey?: string;
41
43
  wenxinSecretKey?: string;
42
44
 
@@ -4,6 +4,7 @@ import {
4
4
  AnthropicProviderCard,
5
5
  BaichuanProviderCard,
6
6
  BedrockProviderCard,
7
+ CloudflareProviderCard,
7
8
  DeepSeekProviderCard,
8
9
  FireworksAIProviderCard,
9
10
  GithubProviderCard,
@@ -28,6 +29,7 @@ import {
28
29
  TogetherAIProviderCard,
29
30
  UpstageProviderCard,
30
31
  WenxinProviderCard,
32
+ XAIProviderCard,
31
33
  ZeroOneProviderCard,
32
34
  ZhiPuProviderCard,
33
35
  filterEnabledModels,
@@ -59,6 +61,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
59
61
  enabled: false,
60
62
  enabledModels: filterEnabledModels(BedrockProviderCard),
61
63
  },
64
+ cloudflare: {
65
+ enabled: false,
66
+ enabledModels: filterEnabledModels(CloudflareProviderCard),
67
+ },
62
68
  deepseek: {
63
69
  enabled: false,
64
70
  enabledModels: filterEnabledModels(DeepSeekProviderCard),
@@ -156,6 +162,10 @@ export const DEFAULT_LLM_CONFIG: UserModelProviderConfig = {
156
162
  enabled: false,
157
163
  enabledModels: filterEnabledModels(WenxinProviderCard),
158
164
  },
165
+ xai: {
166
+ enabled: false,
167
+ enabledModels: filterEnabledModels(XAIProviderCard),
168
+ },
159
169
  zeroone: {
160
170
  enabled: false,
161
171
  enabledModels: filterEnabledModels(ZeroOneProviderCard),
@@ -9,6 +9,7 @@ import { LobeAnthropicAI } from './anthropic';
9
9
  import { LobeAzureOpenAI } from './azureOpenai';
10
10
  import { LobeBaichuanAI } from './baichuan';
11
11
  import { LobeBedrockAI, LobeBedrockAIParams } from './bedrock';
12
+ import { LobeCloudflareAI, LobeCloudflareParams } from './cloudflare';
12
13
  import { LobeDeepSeekAI } from './deepseek';
13
14
  import { LobeFireworksAI } from './fireworksai';
14
15
  import { LobeGithubAI } from './github';
@@ -41,6 +42,7 @@ import {
41
42
  TextToSpeechPayload,
42
43
  } from './types';
43
44
  import { LobeUpstageAI } from './upstage';
45
+ import { LobeXAI } from './xai';
44
46
  import { LobeZeroOneAI } from './zeroone';
45
47
  import { LobeZhipuAI } from './zhipu';
46
48
 
@@ -131,6 +133,7 @@ class AgentRuntime {
131
133
  azure: { apiVersion?: string; apikey?: string; endpoint?: string };
132
134
  baichuan: Partial<ClientOptions>;
133
135
  bedrock: Partial<LobeBedrockAIParams>;
136
+ cloudflare: Partial<LobeCloudflareParams>;
134
137
  deepseek: Partial<ClientOptions>;
135
138
  fireworksai: Partial<ClientOptions>;
136
139
  github: Partial<ClientOptions>;
@@ -154,6 +157,7 @@ class AgentRuntime {
154
157
  taichu: Partial<ClientOptions>;
155
158
  togetherai: Partial<ClientOptions>;
156
159
  upstage: Partial<ClientOptions>;
160
+ xai: Partial<ClientOptions>;
157
161
  zeroone: Partial<ClientOptions>;
158
162
  zhipu: Partial<ClientOptions>;
159
163
  }>,
@@ -321,8 +325,17 @@ class AgentRuntime {
321
325
  runtimeModel = await LobeSenseNovaAI.fromAPIKey(params.sensenova);
322
326
  break;
323
327
  }
324
- }
325
328
 
329
+ case ModelProvider.XAI: {
330
+ runtimeModel = new LobeXAI(params.xai);
331
+ break;
332
+ }
333
+
334
+ case ModelProvider.Cloudflare: {
335
+ runtimeModel = new LobeCloudflareAI(params.cloudflare ?? {});
336
+ break;
337
+ }
338
+ }
326
339
  return new AgentRuntime(runtimeModel);
327
340
  }
328
341
  }