@lobehub/chat 1.47.12 → 1.47.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/package.json +2 -2
  4. package/src/config/aiModels/ai360.ts +23 -27
  5. package/src/config/aiModels/giteeai.ts +61 -31
  6. package/src/config/aiModels/minimax.ts +19 -0
  7. package/src/config/aiModels/moonshot.ts +73 -1
  8. package/src/config/aiModels/taichu.ts +22 -1
  9. package/src/config/aiModels/upstage.ts +1 -1
  10. package/src/config/aiModels/zhipu.ts +13 -0
  11. package/src/config/modelProviders/ai21.ts +0 -2
  12. package/src/config/modelProviders/ai360.ts +22 -26
  13. package/src/config/modelProviders/anthropic.ts +1 -0
  14. package/src/config/modelProviders/giteeai.ts +58 -32
  15. package/src/config/modelProviders/google.ts +2 -0
  16. package/src/config/modelProviders/hunyuan.ts +0 -2
  17. package/src/config/modelProviders/mistral.ts +2 -0
  18. package/src/config/modelProviders/moonshot.ts +2 -0
  19. package/src/config/modelProviders/spark.ts +0 -2
  20. package/src/config/modelProviders/taichu.ts +15 -10
  21. package/src/config/modelProviders/upstage.ts +1 -3
  22. package/src/config/modelProviders/zeroone.ts +5 -1
  23. package/src/config/modelProviders/zhipu.ts +17 -1
  24. package/src/libs/agent-runtime/ai360/index.ts +24 -0
  25. package/src/libs/agent-runtime/anthropic/index.ts +36 -0
  26. package/src/libs/agent-runtime/baichuan/index.ts +28 -0
  27. package/src/libs/agent-runtime/deepseek/index.ts +17 -0
  28. package/src/libs/agent-runtime/fireworksai/index.ts +22 -0
  29. package/src/libs/agent-runtime/giteeai/index.ts +28 -0
  30. package/src/libs/agent-runtime/google/index.ts +40 -1
  31. package/src/libs/agent-runtime/groq/index.ts +29 -0
  32. package/src/libs/agent-runtime/huggingface/index.ts +37 -0
  33. package/src/libs/agent-runtime/internlm/index.ts +17 -0
  34. package/src/libs/agent-runtime/minimax/index.ts +2 -2
  35. package/src/libs/agent-runtime/mistral/index.ts +26 -0
  36. package/src/libs/agent-runtime/moonshot/index.ts +18 -0
  37. package/src/libs/agent-runtime/qwen/index.ts +25 -0
  38. package/src/libs/agent-runtime/sensenova/index.ts +28 -0
  39. package/src/libs/agent-runtime/siliconcloud/index.ts +34 -0
  40. package/src/libs/agent-runtime/stepfun/index.ts +25 -0
  41. package/src/libs/agent-runtime/xai/index.ts +18 -0
  42. package/src/libs/agent-runtime/zeroone/index.ts +17 -0
  43. package/src/libs/agent-runtime/zhipu/index.ts +35 -0
@@ -27,6 +27,16 @@ import { StreamingResponse } from '../utils/response';
27
27
  import { GoogleGenerativeAIStream, convertIterableToStream } from '../utils/streams';
28
28
  import { parseDataUri } from '../utils/uriParser';
29
29
 
30
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
31
+ import type { ChatModelCard } from '@/types/llm';
32
+
33
+ export interface GoogleModelCard {
34
+ displayName: string;
35
+ inputTokenLimit: number;
36
+ name: string;
37
+ outputTokenLimit: number;
38
+ }
39
+
30
40
  enum HarmCategory {
31
41
  HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT',
32
42
  HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT',
@@ -46,15 +56,19 @@ function getThreshold(model: string): HarmBlockThreshold {
46
56
  return HarmBlockThreshold.BLOCK_NONE;
47
57
  }
48
58
 
59
+ const DEFAULT_BASE_URL = 'https://generativelanguage.googleapis.com';
60
+
49
61
  export class LobeGoogleAI implements LobeRuntimeAI {
50
62
  private client: GoogleGenerativeAI;
51
63
  baseURL?: string;
64
+ apiKey?: string;
52
65
 
53
66
  constructor({ apiKey, baseURL }: { apiKey?: string; baseURL?: string } = {}) {
54
67
  if (!apiKey) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidProviderAPIKey);
55
68
 
56
69
  this.client = new GoogleGenerativeAI(apiKey);
57
- this.baseURL = baseURL;
70
+ this.baseURL = baseURL || DEFAULT_BASE_URL;
71
+ this.apiKey = apiKey;
58
72
  }
59
73
 
60
74
  async chat(rawPayload: ChatStreamPayload, options?: ChatCompetitionOptions) {
@@ -123,6 +137,31 @@ export class LobeGoogleAI implements LobeRuntimeAI {
123
137
  }
124
138
  }
125
139
 
140
+ async models() {
141
+ const url = `${this.baseURL}/v1beta/models?key=${this.apiKey}`;
142
+ const response = await fetch(url, {
143
+ method: 'GET',
144
+ });
145
+ const json = await response.json();
146
+
147
+ const modelList: GoogleModelCard[] = json['models'];
148
+
149
+ return modelList
150
+ .map((model) => {
151
+ const modelName = model.name.replace(/^models\//, '');
152
+
153
+ return {
154
+ contextWindowTokens: model.inputTokenLimit + model.outputTokenLimit,
155
+ displayName: model.displayName,
156
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => modelName.endsWith(m.id))?.enabled || false,
157
+ functionCall: modelName.toLowerCase().includes('gemini'),
158
+ id: modelName,
159
+ vision: modelName.toLowerCase().includes('vision') || modelName.toLowerCase().includes('gemini') && !modelName.toLowerCase().includes('gemini-1.0'),
160
+ };
161
+ })
162
+ .filter(Boolean) as ChatModelCard[];
163
+ }
164
+
126
165
  private buildPayload(payload: ChatStreamPayload) {
127
166
  const system_message = payload.messages.find((m) => m.role === 'system');
128
167
  const user_messages = payload.messages.filter((m) => m.role !== 'system');
@@ -2,6 +2,13 @@ import { AgentRuntimeErrorType } from '../error';
2
2
  import { ModelProvider } from '../types';
3
3
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
4
 
5
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
6
+
7
+ export interface GroqModelCard {
8
+ context_window: number;
9
+ id: string;
10
+ }
11
+
5
12
  export const LobeGroq = LobeOpenAICompatibleFactory({
6
13
  baseURL: 'https://api.groq.com/openai/v1',
7
14
  chatCompletion: {
@@ -24,5 +31,27 @@ export const LobeGroq = LobeOpenAICompatibleFactory({
24
31
  debug: {
25
32
  chatCompletion: () => process.env.DEBUG_GROQ_CHAT_COMPLETION === '1',
26
33
  },
34
+ models: {
35
+ transformModel: (m) => {
36
+ const functionCallKeywords = [
37
+ 'tool',
38
+ 'llama-3.3',
39
+ 'llama-3.1',
40
+ 'llama3-',
41
+ 'mixtral-8x7b-32768',
42
+ 'gemma2-9b-it',
43
+ ];
44
+
45
+ const model = m as unknown as GroqModelCard;
46
+
47
+ return {
48
+ contextWindowTokens: model.context_window,
49
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
50
+ functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
51
+ id: model.id,
52
+ vision: model.id.toLowerCase().includes('vision'),
53
+ };
54
+ },
55
+ },
27
56
  provider: ModelProvider.Groq,
28
57
  });
@@ -6,6 +6,14 @@ import { ModelProvider } from '../types';
6
6
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
7
7
  import { convertIterableToStream } from '../utils/streams';
8
8
 
9
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
10
+ import type { ChatModelCard } from '@/types/llm';
11
+
12
+ export interface HuggingFaceModelCard {
13
+ id: string;
14
+ tags: string[];
15
+ }
16
+
9
17
  export const LobeHuggingFaceAI = LobeOpenAICompatibleFactory({
10
18
  chatCompletion: {
11
19
  handleStreamBizErrorType: (error) => {
@@ -47,5 +55,34 @@ export const LobeHuggingFaceAI = LobeOpenAICompatibleFactory({
47
55
  debug: {
48
56
  chatCompletion: () => process.env.DEBUG_HUGGINGFACE_CHAT_COMPLETION === '1',
49
57
  },
58
+ models: async () => {
59
+ const visionKeywords = [
60
+ 'image-text-to-text',
61
+ 'multimodal',
62
+ 'vision',
63
+ ];
64
+
65
+ // ref: https://huggingface.co/docs/hub/api
66
+ const url = 'https://huggingface.co/api/models';
67
+ const response = await fetch(url, {
68
+ method: 'GET',
69
+ });
70
+ const json = await response.json();
71
+
72
+ const modelList: HuggingFaceModelCard[] = json;
73
+
74
+ return modelList
75
+ .map((model) => {
76
+ return {
77
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
78
+ functionCall: model.tags.some(tag => tag.toLowerCase().includes('function-calling')),
79
+ id: model.id,
80
+ vision: model.tags.some(tag =>
81
+ visionKeywords.some(keyword => tag.toLowerCase().includes(keyword))
82
+ ),
83
+ };
84
+ })
85
+ .filter(Boolean) as ChatModelCard[];
86
+ },
50
87
  provider: ModelProvider.HuggingFace,
51
88
  });
@@ -1,6 +1,12 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+
6
+ export interface InternLMModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeInternLMAI = LobeOpenAICompatibleFactory({
5
11
  baseURL: 'https://internlm-chat.intern-ai.org.cn/puyu/api/v1',
6
12
  chatCompletion: {
@@ -14,5 +20,16 @@ export const LobeInternLMAI = LobeOpenAICompatibleFactory({
14
20
  debug: {
15
21
  chatCompletion: () => process.env.DEBUG_INTERNLM_CHAT_COMPLETION === '1',
16
22
  },
23
+ models: {
24
+ transformModel: (m) => {
25
+ const model = m as unknown as InternLMModelCard;
26
+
27
+ return {
28
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
29
+ functionCall: true,
30
+ id: model.id,
31
+ };
32
+ },
33
+ },
17
34
  provider: ModelProvider.InternLM,
18
35
  });
@@ -1,10 +1,10 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
- import Minimax from '@/config/modelProviders/minimax';
4
+ import minimaxChatModels from '@/config/aiModels/minimax';
5
5
 
6
6
  export const getMinimaxMaxOutputs = (modelId: string): number | undefined => {
7
- const model = Minimax.chatModels.find(model => model.id === modelId);
7
+ const model = minimaxChatModels.find(model => model.id === modelId);
8
8
  return model ? model.maxOutput : undefined;
9
9
  };
10
10
 
@@ -1,6 +1,18 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+
6
+ export interface MistralModelCard {
7
+ capabilities: {
8
+ function_calling: boolean;
9
+ vision: boolean;
10
+ };
11
+ description: string;
12
+ id: string;
13
+ max_context_length: number;
14
+ }
15
+
4
16
  export const LobeMistralAI = LobeOpenAICompatibleFactory({
5
17
  baseURL: 'https://api.mistral.ai/v1',
6
18
  chatCompletion: {
@@ -18,5 +30,19 @@ export const LobeMistralAI = LobeOpenAICompatibleFactory({
18
30
  debug: {
19
31
  chatCompletion: () => process.env.DEBUG_MISTRAL_CHAT_COMPLETION === '1',
20
32
  },
33
+ models: {
34
+ transformModel: (m) => {
35
+ const model = m as unknown as MistralModelCard;
36
+
37
+ return {
38
+ contextWindowTokens: model.max_context_length,
39
+ description: model.description,
40
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
41
+ functionCall: model.capabilities.function_calling,
42
+ id: model.id,
43
+ vision: model.capabilities.vision,
44
+ };
45
+ },
46
+ },
21
47
  provider: ModelProvider.Mistral,
22
48
  });
@@ -3,6 +3,12 @@ import OpenAI from 'openai';
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
5
 
6
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
7
+
8
+ export interface MoonshotModelCard {
9
+ id: string;
10
+ }
11
+
6
12
  export const LobeMoonshotAI = LobeOpenAICompatibleFactory({
7
13
  baseURL: 'https://api.moonshot.cn/v1',
8
14
  chatCompletion: {
@@ -18,5 +24,17 @@ export const LobeMoonshotAI = LobeOpenAICompatibleFactory({
18
24
  debug: {
19
25
  chatCompletion: () => process.env.DEBUG_MOONSHOT_CHAT_COMPLETION === '1',
20
26
  },
27
+ models: {
28
+ transformModel: (m) => {
29
+ const model = m as unknown as MoonshotModelCard;
30
+
31
+ return {
32
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
33
+ functionCall: true,
34
+ id: model.id,
35
+ vision: model.id.toLowerCase().includes('vision'),
36
+ };
37
+ },
38
+ },
21
39
  provider: ModelProvider.Moonshot,
22
40
  });
@@ -3,6 +3,12 @@ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
4
  import { QwenAIStream } from '../utils/streams';
5
5
 
6
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
7
+
8
+ export interface QwenModelCard {
9
+ id: string;
10
+ }
11
+
6
12
  /*
7
13
  QwenEnableSearchModelSeries: An array of Qwen model series that support the enable_search parameter.
8
14
  Currently, enable_search is only supported on Qwen commercial series, excluding Qwen-VL and Qwen-Long series.
@@ -60,6 +66,25 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
60
66
  },
61
67
  debug: {
62
68
  chatCompletion: () => process.env.DEBUG_QWEN_CHAT_COMPLETION === '1',
69
+ },
70
+ models: {
71
+ transformModel: (m) => {
72
+ const functionCallKeywords = [
73
+ 'qwen-max',
74
+ 'qwen-plus',
75
+ 'qwen-turbo',
76
+ 'qwen2.5',
77
+ ];
78
+
79
+ const model = m as unknown as QwenModelCard;
80
+
81
+ return {
82
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
83
+ functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
84
+ id: model.id,
85
+ vision: model.id.toLowerCase().includes('vl'),
86
+ };
87
+ },
63
88
  },
64
89
  provider: ModelProvider.Qwen,
65
90
  });
@@ -1,6 +1,13 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+ import type { ChatModelCard } from '@/types/llm';
6
+
7
+ export interface SenseNovaModelCard {
8
+ id: string;
9
+ }
10
+
4
11
  export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
5
12
  baseURL: 'https://api.sensenova.cn/compatible-mode/v1',
6
13
  chatCompletion: {
@@ -25,5 +32,26 @@ export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
25
32
  debug: {
26
33
  chatCompletion: () => process.env.DEBUG_SENSENOVA_CHAT_COMPLETION === '1',
27
34
  },
35
+ models: async ({ client }) => {
36
+ const functionCallKeywords = [
37
+ 'sensechat-5',
38
+ ];
39
+
40
+ client.baseURL = 'https://api.sensenova.cn/v1/llm';
41
+
42
+ const modelsPage = await client.models.list() as any;
43
+ const modelList: SenseNovaModelCard[] = modelsPage.data;
44
+
45
+ return modelList
46
+ .map((model) => {
47
+ return {
48
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
49
+ functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
50
+ id: model.id,
51
+ vision: model.id.toLowerCase().includes('vision'),
52
+ };
53
+ })
54
+ .filter(Boolean) as ChatModelCard[];
55
+ },
28
56
  provider: ModelProvider.SenseNova,
29
57
  });
@@ -1,6 +1,12 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+
6
+ export interface SiliconCloudModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
5
11
  baseURL: 'https://api.siliconflow.cn/v1',
6
12
  chatCompletion: {
@@ -14,5 +20,33 @@ export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
14
20
  debug: {
15
21
  chatCompletion: () => process.env.DEBUG_SILICONCLOUD_CHAT_COMPLETION === '1',
16
22
  },
23
+ models: {
24
+ transformModel: (m) => {
25
+ const functionCallKeywords = [
26
+ 'qwen/qwen2.5',
27
+ 'thudm/glm-4',
28
+ 'deepseek-ai/deepSeek',
29
+ 'internlm/internlm2_5',
30
+ 'meta-llama/meta-llama-3.1',
31
+ 'meta-llama/meta-llama-3.3',
32
+ ];
33
+
34
+ const visionKeywords = [
35
+ 'opengvlab/internvl',
36
+ 'qwen/qwen2-vl',
37
+ 'teleai/telemm',
38
+ 'deepseek-ai/deepseek-vl',
39
+ ];
40
+
41
+ const model = m as unknown as SiliconCloudModelCard;
42
+
43
+ return {
44
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
45
+ functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
46
+ id: model.id,
47
+ vision: visionKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
48
+ };
49
+ },
50
+ },
17
51
  provider: ModelProvider.SiliconCloud,
18
52
  });
@@ -1,6 +1,12 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+
6
+ export interface StepfunModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeStepfunAI = LobeOpenAICompatibleFactory({
5
11
  baseURL: 'https://api.stepfun.com/v1',
6
12
  chatCompletion: {
@@ -14,5 +20,24 @@ export const LobeStepfunAI = LobeOpenAICompatibleFactory({
14
20
  debug: {
15
21
  chatCompletion: () => process.env.DEBUG_STEPFUN_CHAT_COMPLETION === '1',
16
22
  },
23
+ models: {
24
+ transformModel: (m) => {
25
+ // ref: https://platform.stepfun.com/docs/llm/modeloverview
26
+ const functionCallKeywords = [
27
+ 'step-1-',
28
+ 'step-2-',
29
+ 'step-1v-',
30
+ ];
31
+
32
+ const model = m as unknown as StepfunModelCard;
33
+
34
+ return {
35
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
36
+ functionCall: functionCallKeywords.some(keyword => model.id.toLowerCase().includes(keyword)),
37
+ id: model.id,
38
+ vision: model.id.toLowerCase().includes('v'),
39
+ };
40
+ },
41
+ },
17
42
  provider: ModelProvider.Stepfun,
18
43
  });
@@ -1,10 +1,28 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+
6
+ export interface XAIModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeXAI = LobeOpenAICompatibleFactory({
5
11
  baseURL: 'https://api.x.ai/v1',
6
12
  debug: {
7
13
  chatCompletion: () => process.env.DEBUG_XAI_CHAT_COMPLETION === '1',
8
14
  },
15
+ models: {
16
+ transformModel: (m) => {
17
+ const model = m as unknown as XAIModelCard;
18
+
19
+ return {
20
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
21
+ functionCall: true,
22
+ id: model.id,
23
+ vision: model.id.toLowerCase().includes('vision'),
24
+ };
25
+ },
26
+ },
9
27
  provider: ModelProvider.XAI,
10
28
  });
@@ -1,11 +1,28 @@
1
1
  import { ModelProvider } from '../types';
2
2
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
5
+
6
+ export interface ZeroOneModelCard {
7
+ id: string;
8
+ }
9
+
4
10
  export const LobeZeroOneAI = LobeOpenAICompatibleFactory({
5
11
  baseURL: 'https://api.lingyiwanwu.com/v1',
6
12
  debug: {
7
13
  chatCompletion: () => process.env.DEBUG_ZEROONE_CHAT_COMPLETION === '1',
8
14
  },
15
+ models: {
16
+ transformModel: (m) => {
17
+ const model = m as unknown as ZeroOneModelCard;
9
18
 
19
+ return {
20
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.id.endsWith(m.id))?.enabled || false,
21
+ functionCall: model.id.toLowerCase().includes('fc'),
22
+ id: model.id,
23
+ vision: model.id.toLowerCase().includes('vision'),
24
+ };
25
+ },
26
+ },
10
27
  provider: ModelProvider.ZeroOne,
11
28
  });
@@ -3,6 +3,15 @@ import OpenAI from 'openai';
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
4
  import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
5
 
6
+ import { LOBE_DEFAULT_MODEL_LIST } from '@/config/aiModels';
7
+ import type { ChatModelCard } from '@/types/llm';
8
+
9
+ export interface ZhipuModelCard {
10
+ description: string;
11
+ modelCode: string;
12
+ modelName: string;
13
+ }
14
+
6
15
  export const LobeZhipuAI = LobeOpenAICompatibleFactory({
7
16
  baseURL: 'https://open.bigmodel.cn/api/paas/v4',
8
17
  chatCompletion: {
@@ -25,8 +34,34 @@ export const LobeZhipuAI = LobeOpenAICompatibleFactory({
25
34
  }),
26
35
  }) as OpenAI.ChatCompletionCreateParamsStreaming,
27
36
  },
37
+ constructorOptions: {
38
+ defaultHeaders: {
39
+ 'Bigmodel-Organization': 'lobehub',
40
+ 'Bigmodel-project': 'lobechat',
41
+ },
42
+ },
28
43
  debug: {
29
44
  chatCompletion: () => process.env.DEBUG_ZHIPU_CHAT_COMPLETION === '1',
30
45
  },
46
+ models: async ({ client }) => {
47
+ // ref: https://open.bigmodel.cn/console/modelcenter/square
48
+ client.baseURL = 'https://open.bigmodel.cn/api/fine-tuning/model_center/list?pageSize=100&pageNum=1';
49
+
50
+ const modelsPage = await client.models.list() as any;
51
+ const modelList: ZhipuModelCard[] = modelsPage.body.rows;
52
+
53
+ return modelList
54
+ .map((model) => {
55
+ return {
56
+ description: model.description,
57
+ displayName: model.modelName,
58
+ enabled: LOBE_DEFAULT_MODEL_LIST.find((m) => model.modelCode.endsWith(m.id))?.enabled || false,
59
+ functionCall: model.modelCode.toLowerCase().includes('glm-4') && !model.modelCode.toLowerCase().includes('glm-4v'),
60
+ id: model.modelCode,
61
+ vision: model.modelCode.toLowerCase().includes('glm-4v'),
62
+ };
63
+ })
64
+ .filter(Boolean) as ChatModelCard[];
65
+ },
31
66
  provider: ModelProvider.ZhiPu,
32
67
  });