@lobehub/chat 0.144.0 → 0.145.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. package/.env.example +6 -0
  2. package/CHANGELOG.md +50 -0
  3. package/Dockerfile +3 -0
  4. package/docs/self-hosting/environment-variables/model-provider.mdx +9 -0
  5. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +9 -0
  6. package/locales/ar/common.json +1 -0
  7. package/locales/ar/error.json +6 -0
  8. package/locales/ar/setting.json +16 -0
  9. package/locales/bg-BG/chat.json +1 -1
  10. package/locales/bg-BG/error.json +1 -1
  11. package/locales/bg-BG/market.json +1 -1
  12. package/locales/bg-BG/migration.json +1 -1
  13. package/locales/bg-BG/plugin.json +1 -1
  14. package/locales/bg-BG/setting.json +1 -1
  15. package/locales/bg-BG/tool.json +1 -1
  16. package/locales/bg-BG/welcome.json +1 -1
  17. package/locales/de-DE/common.json +1 -0
  18. package/locales/de-DE/error.json +6 -0
  19. package/locales/de-DE/setting.json +16 -0
  20. package/locales/en-US/common.json +1 -0
  21. package/locales/en-US/error.json +6 -0
  22. package/locales/en-US/setting.json +16 -0
  23. package/locales/es-ES/common.json +1 -0
  24. package/locales/es-ES/error.json +6 -0
  25. package/locales/es-ES/setting.json +16 -0
  26. package/locales/fr-FR/common.json +1 -0
  27. package/locales/fr-FR/error.json +6 -0
  28. package/locales/fr-FR/setting.json +16 -0
  29. package/locales/it-IT/common.json +1 -0
  30. package/locales/it-IT/error.json +6 -0
  31. package/locales/it-IT/setting.json +16 -0
  32. package/locales/ja-JP/common.json +1 -0
  33. package/locales/ja-JP/error.json +6 -0
  34. package/locales/ja-JP/setting.json +16 -0
  35. package/locales/ko-KR/common.json +1 -0
  36. package/locales/ko-KR/error.json +2 -0
  37. package/locales/ko-KR/setting.json +16 -0
  38. package/locales/nl-NL/common.json +1 -0
  39. package/locales/nl-NL/error.json +6 -0
  40. package/locales/nl-NL/setting.json +16 -0
  41. package/locales/pl-PL/common.json +1 -0
  42. package/locales/pl-PL/error.json +6 -0
  43. package/locales/pl-PL/setting.json +23 -7
  44. package/locales/pt-BR/common.json +1 -0
  45. package/locales/pt-BR/error.json +6 -0
  46. package/locales/pt-BR/setting.json +16 -0
  47. package/locales/ru-RU/common.json +1 -0
  48. package/locales/ru-RU/error.json +6 -0
  49. package/locales/ru-RU/setting.json +16 -0
  50. package/locales/tr-TR/common.json +1 -0
  51. package/locales/tr-TR/error.json +6 -0
  52. package/locales/tr-TR/setting.json +23 -7
  53. package/locales/vi-VN/common.json +1 -0
  54. package/locales/vi-VN/error.json +6 -0
  55. package/locales/vi-VN/setting.json +16 -0
  56. package/locales/zh-CN/common.json +1 -0
  57. package/locales/zh-CN/error.json +6 -0
  58. package/locales/zh-CN/setting.json +16 -0
  59. package/locales/zh-TW/common.json +1 -0
  60. package/locales/zh-TW/error.json +6 -0
  61. package/locales/zh-TW/setting.json +16 -0
  62. package/package.json +1 -1
  63. package/src/app/api/chat/[provider]/agentRuntime.test.ts +26 -0
  64. package/src/app/api/chat/[provider]/agentRuntime.ts +13 -1
  65. package/src/app/api/config/route.ts +2 -0
  66. package/src/app/api/errorResponse.test.ts +5 -0
  67. package/src/app/api/errorResponse.ts +3 -0
  68. package/src/app/settings/llm/Google/index.tsx +2 -2
  69. package/src/app/settings/llm/TogetherAI/index.tsx +66 -0
  70. package/src/app/settings/llm/index.tsx +2 -0
  71. package/src/components/ModelProviderIcon/index.tsx +5 -0
  72. package/src/config/modelProviders/index.ts +3 -0
  73. package/src/config/modelProviders/ollama.ts +3 -3
  74. package/src/config/modelProviders/togetherai.ts +86 -0
  75. package/src/config/modelProviders/zeroone.ts +2 -1
  76. package/src/config/server/provider.ts +8 -0
  77. package/src/const/settings.ts +4 -0
  78. package/src/features/Conversation/Error/APIKeyForm/TogetherAI.tsx +40 -0
  79. package/src/features/Conversation/Error/APIKeyForm/index.tsx +5 -0
  80. package/src/features/Conversation/Error/index.tsx +1 -0
  81. package/src/libs/agent-runtime/error.ts +3 -0
  82. package/src/libs/agent-runtime/index.ts +1 -0
  83. package/src/libs/agent-runtime/ollama/index.test.ts +46 -1
  84. package/src/libs/agent-runtime/ollama/index.ts +40 -0
  85. package/src/libs/agent-runtime/togetherai/index.test.ts +347 -0
  86. package/src/libs/agent-runtime/togetherai/index.ts +86 -0
  87. package/src/libs/agent-runtime/types/chat.ts +7 -0
  88. package/src/libs/agent-runtime/types/type.ts +1 -0
  89. package/src/libs/agent-runtime/zeroone/index.ts +1 -1
  90. package/src/locales/default/common.ts +1 -0
  91. package/src/locales/default/error.ts +7 -0
  92. package/src/locales/default/setting.ts +17 -1
  93. package/src/services/_auth.test.ts +10 -0
  94. package/src/services/_auth.ts +4 -0
  95. package/src/store/global/slices/settings/selectors/modelProvider.test.ts +2 -2
  96. package/src/store/global/slices/settings/selectors/modelProvider.ts +26 -6
  97. package/src/types/settings/modelProvider.ts +7 -0
@@ -21,6 +21,7 @@ import {
21
21
  LobeOpenRouterAI,
22
22
  LobePerplexityAI,
23
23
  LobeRuntimeAI,
24
+ LobeTogetherAI,
24
25
  LobeZeroOneAI,
25
26
  LobeZhipuAI,
26
27
  ModelProvider,
@@ -181,6 +182,11 @@ class AgentRuntime {
181
182
  break;
182
183
  }
183
184
 
185
+ case ModelProvider.TogetherAI: {
186
+ runtimeModel = this.initTogetherAI(payload);
187
+ break;
188
+ }
189
+
184
190
  case ModelProvider.ZeroOne: {
185
191
  runtimeModel = this.initZeroOne(payload);
186
192
  break;
@@ -301,13 +307,19 @@ class AgentRuntime {
301
307
  return new LobeOpenRouterAI({ apiKey });
302
308
  }
303
309
 
310
+ private static initTogetherAI(payload: JWTPayload) {
311
+ const { TOGETHERAI_API_KEY } = getServerConfig();
312
+ const apiKey = apiKeyManager.pick(payload?.apiKey || TOGETHERAI_API_KEY);
313
+
314
+ return new LobeTogetherAI({ apiKey });
315
+ }
316
+
304
317
  private static initZeroOne(payload: JWTPayload) {
305
318
  const { ZEROONE_API_KEY } = getServerConfig();
306
319
  const apiKey = apiKeyManager.pick(payload?.apiKey || ZEROONE_API_KEY);
307
320
 
308
321
  return new LobeZeroOneAI({ apiKey });
309
322
  }
310
-
311
323
  }
312
324
 
313
325
  export default AgentRuntime;
@@ -24,6 +24,7 @@ export const GET = async () => {
24
24
  ENABLED_MISTRAL,
25
25
  ENABLED_OPENROUTER,
26
26
  ENABLED_ZEROONE,
27
+ ENABLED_TOGETHERAI,
27
28
  DEFAULT_AGENT_CONFIG,
28
29
  OLLAMA_CUSTOM_MODELS,
29
30
  OPENROUTER_CUSTOM_MODELS,
@@ -46,6 +47,7 @@ export const GET = async () => {
46
47
  ollama: { customModelName: OLLAMA_CUSTOM_MODELS, enabled: ENABLE_OLLAMA },
47
48
  openrouter: { customModelName: OPENROUTER_CUSTOM_MODELS, enabled: ENABLED_OPENROUTER },
48
49
  perplexity: { enabled: ENABLED_PERPLEXITY },
50
+ togetherai: { enabled: ENABLED_TOGETHERAI },
49
51
  zeroone: { enabled: ENABLED_ZEROONE },
50
52
  zhipu: { enabled: ENABLED_ZHIPU },
51
53
  },
@@ -121,6 +121,11 @@ describe('createErrorResponse', () => {
121
121
  expect(response.status).toBe(481);
122
122
  });
123
123
 
124
+ it('returns a 484 status for TogetherAIBizError error type', () => {
125
+ const errorType = AgentRuntimeErrorType.TogetherAIBizError;
126
+ const response = createErrorResponse(errorType);
127
+ expect(response.status).toBe(484);
128
+ });
124
129
  });
125
130
 
126
131
  // 测试状态码不在200-599范围内的情况
@@ -59,6 +59,9 @@ const getStatus = (errorType: ILobeAgentRuntimeErrorType | ErrorType) => {
59
59
  case AgentRuntimeErrorType.ZeroOneBizError: {
60
60
  return 483;
61
61
  }
62
+ case AgentRuntimeErrorType.TogetherAIBizError: {
63
+ return 484;
64
+ }
62
65
  }
63
66
  return errorType as number;
64
67
  };
@@ -1,5 +1,5 @@
1
- import { Google, Gemini } from '@lobehub/icons';
2
- import { Input, Divider } from 'antd';
1
+ import { Gemini, Google } from '@lobehub/icons';
2
+ import { Divider, Input } from 'antd';
3
3
  import { memo } from 'react';
4
4
  import { useTranslation } from 'react-i18next';
5
5
  import { Flexbox } from 'react-layout-kit';
@@ -0,0 +1,66 @@
1
+ import { Together } from '@lobehub/icons';
2
+ import { Input } from 'antd';
3
+ import { useTheme } from 'antd-style';
4
+ import { memo } from 'react';
5
+ import { useTranslation } from 'react-i18next';
6
+
7
+ import { ModelProvider } from '@/libs/agent-runtime';
8
+
9
+ import Checker from '../components/Checker';
10
+ import ProviderConfig from '../components/ProviderConfig';
11
+ import { LLMProviderApiTokenKey, LLMProviderConfigKey } from '../const';
12
+
13
+ const providerKey = 'togetherai';
14
+
15
+ const TogetherAIProvider = memo(() => {
16
+ const { t } = useTranslation('setting');
17
+
18
+ const theme = useTheme();
19
+
20
+ return (
21
+ <ProviderConfig
22
+ configItems={[
23
+ {
24
+ children: (
25
+ <Input.Password
26
+ autoComplete={'new-password'}
27
+ placeholder={t('llm.TogetherAI.token.placeholder')}
28
+ />
29
+ ),
30
+ desc: t('llm.TogetherAI.token.desc'),
31
+ label: t('llm.TogetherAI.token.title'),
32
+ name: [LLMProviderConfigKey, providerKey, LLMProviderApiTokenKey],
33
+ },
34
+ {
35
+ children: (
36
+ <Input.TextArea
37
+ allowClear
38
+ placeholder={t('llm.TogetherAI.customModelName.placeholder')}
39
+ style={{ height: 100 }}
40
+ />
41
+ ),
42
+ desc: t('llm.TogetherAI.customModelName.desc'),
43
+ label: t('llm.TogetherAI.customModelName.title'),
44
+ name: [LLMProviderConfigKey, providerKey, 'customModelName'],
45
+ },
46
+ {
47
+ children: (
48
+ <Checker model={'togethercomputer/alpaca-7b'} provider={ModelProvider.TogetherAI} />
49
+ ),
50
+ desc: t('llm.checker.desc'),
51
+ label: t('llm.checker.title'),
52
+ minWidth: '100%',
53
+ },
54
+ ]}
55
+ provider={providerKey}
56
+ title={
57
+ <Together.Combine
58
+ color={theme.isDarkMode ? theme.colorText : Together.colorPrimary}
59
+ size={24}
60
+ />
61
+ }
62
+ />
63
+ );
64
+ });
65
+
66
+ export default TogetherAIProvider;
@@ -18,6 +18,7 @@ import Ollama from './Ollama';
18
18
  import OpenAI from './OpenAI';
19
19
  import OpenRouter from './OpenRouter';
20
20
  import Perplexity from './Perplexity';
21
+ import TogetherAI from './TogetherAI';
21
22
  import ZeroOne from './ZeroOne';
22
23
  import Zhipu from './Zhipu';
23
24
 
@@ -40,6 +41,7 @@ export default memo<{ showOllama: boolean }>(({ showOllama }) => {
40
41
  <Moonshot />
41
42
  <ZeroOne />
42
43
  <Zhipu />
44
+ <TogetherAI />
43
45
  <Footer>
44
46
  <Trans i18nKey="llm.waitingForMore" ns={'setting'}>
45
47
  更多模型正在
@@ -10,6 +10,7 @@ import {
10
10
  OpenAI,
11
11
  OpenRouter,
12
12
  Perplexity,
13
+ Together,
13
14
  ZeroOne,
14
15
  Zhipu,
15
16
  } from '@lobehub/icons';
@@ -84,6 +85,10 @@ const ModelProviderIcon = memo<ModelProviderIconProps>(({ provider }) => {
84
85
  return <ZeroOne size={20} />;
85
86
  }
86
87
 
88
+ case ModelProvider.TogetherAI: {
89
+ return <Together size={20} />;
90
+ }
91
+
87
92
  default: {
88
93
  return null;
89
94
  }
@@ -10,6 +10,7 @@ import OllamaProvider from './ollama';
10
10
  import OpenAIProvider from './openai';
11
11
  import OpenRouterProvider from './openrouter';
12
12
  import PerplexityProvider from './perplexity';
13
+ import TogetherAIProvider from './togetherai';
13
14
  import ZeroOneProvider from './zeroone';
14
15
  import ZhiPuProvider from './zhipu';
15
16
 
@@ -23,6 +24,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
23
24
  MoonshotProvider.chatModels,
24
25
  OllamaProvider.chatModels,
25
26
  OpenRouterProvider.chatModels,
27
+ TogetherAIProvider.chatModels,
26
28
  PerplexityProvider.chatModels,
27
29
  AnthropicProvider.chatModels,
28
30
  ZeroOneProvider.chatModels,
@@ -38,5 +40,6 @@ export { default as OllamaProvider } from './ollama';
38
40
  export { default as OpenAIProvider } from './openai';
39
41
  export { default as OpenRouterProvider } from './openrouter';
40
42
  export { default as PerplexityProvider } from './perplexity';
43
+ export { default as TogetherAIProvider } from './togetherai';
41
44
  export { default as ZeroOneProvider } from './zeroone';
42
45
  export { default as ZhiPuProvider } from './zhipu';
@@ -135,7 +135,7 @@ const Ollama: ModelProviderCard = {
135
135
  hidden: true,
136
136
  id: 'llava',
137
137
  tokens: 4000,
138
- vision: false,
138
+ vision: true,
139
139
  },
140
140
  {
141
141
  displayName: 'LLaVA 13B',
@@ -143,7 +143,7 @@ const Ollama: ModelProviderCard = {
143
143
  hidden: true,
144
144
  id: 'llava:13b',
145
145
  tokens: 4000,
146
- vision: false,
146
+ vision: true,
147
147
  },
148
148
  {
149
149
  displayName: 'LLaVA 34B',
@@ -151,7 +151,7 @@ const Ollama: ModelProviderCard = {
151
151
  hidden: true,
152
152
  id: 'llava:34b',
153
153
  tokens: 4000,
154
- vision: false,
154
+ vision: true,
155
155
  },
156
156
  ],
157
157
  id: 'ollama',
@@ -0,0 +1,86 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ const TogetherAI: ModelProviderCard = {
4
+ chatModels: [
5
+ {
6
+ displayName: 'Deepseek Coder Instruct (33B)',
7
+ functionCall: false,
8
+ id: 'deepseek-ai/deepseek-coder-33b-instruct',
9
+ tokens: 16_384,
10
+ vision: false,
11
+ },
12
+ {
13
+ displayName: 'Phind Code LLaMA v2 (34B)',
14
+ functionCall: false,
15
+ id: 'Phind/Phind-CodeLlama-34B-v2',
16
+ tokens: 16_384,
17
+ vision: false,
18
+ },
19
+ {
20
+ displayName: 'Gemma Instruct (2B)',
21
+ functionCall: false,
22
+ id: 'google/gemma-2b-it',
23
+ tokens: 8192,
24
+ vision: false,
25
+ },
26
+ {
27
+ displayName: 'LLaMA-2 Chat (13B)',
28
+ functionCall: false,
29
+ id: 'meta-llama/Llama-2-13b-chat-hf',
30
+ tokens: 4096,
31
+ vision: false,
32
+ },
33
+ {
34
+ displayName: '01-ai Yi Chat (34B)',
35
+ functionCall: false,
36
+ id: 'zero-one-ai/Yi-34B-Chat',
37
+ tokens: 4096,
38
+ vision: false,
39
+ },
40
+ {
41
+ displayName: 'Mixtral-8x7B Instruct (46.7B)',
42
+ functionCall: false,
43
+ id: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
44
+ tokens: 32_768,
45
+ vision: false,
46
+ },
47
+ {
48
+ displayName: 'Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B)',
49
+ functionCall: false,
50
+ id: 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
51
+ tokens: 32_768,
52
+ vision: false,
53
+ },
54
+ {
55
+ displayName: 'Nous Hermes-2 Yi (34B)',
56
+ functionCall: false,
57
+ id: 'NousResearch/Nous-Hermes-2-Yi-34B',
58
+ tokens: 4096,
59
+ vision: false,
60
+ },
61
+ {
62
+ displayName: 'Qwen 1.5 Chat (7B)',
63
+ functionCall: false,
64
+ id: 'Qwen/Qwen1.5-7B-Chat',
65
+ tokens: 32_768,
66
+ vision: false,
67
+ },
68
+ {
69
+ displayName: 'Qwen 1.5 Chat (14B)',
70
+ functionCall: false,
71
+ id: 'Qwen/Qwen1.5-14B-Chat',
72
+ tokens: 32_768,
73
+ vision: false,
74
+ },
75
+ {
76
+ displayName: 'Qwen 1.5 Chat (72B)',
77
+ functionCall: false,
78
+ id: 'Qwen/Qwen1.5-72B-Chat',
79
+ tokens: 4096,
80
+ vision: false,
81
+ },
82
+ ],
83
+ id: 'togetherai',
84
+ };
85
+
86
+ export default TogetherAI;
@@ -9,7 +9,8 @@ const ZeroOne: ModelProviderCard = {
9
9
  tokens: 4000,
10
10
  },
11
11
  {
12
- description: '支持通用图片问答、图表理解、OCR、视觉推理,能处理高分辨率(1024*1024)的图像,能在复杂视觉任务上提供优秀性能,同时支持多种语言。',
12
+ description:
13
+ '支持通用图片问答、图表理解、OCR、视觉推理,能处理高分辨率(1024*1024)的图像,能在复杂视觉任务上提供优秀性能,同时支持多种语言。',
13
14
  displayName: 'YI Vision Plus',
14
15
  id: 'yi-vl-plus',
15
16
  tokens: 4000,
@@ -50,6 +50,9 @@ declare global {
50
50
  // ZeroOne Provider
51
51
  ZEROONE_API_KEY?: string;
52
52
 
53
+ // TogetherAI Provider
54
+ TOGETHERAI_API_KEY?: string;
55
+
53
56
  // AWS Credentials
54
57
  AWS_REGION?: string;
55
58
  AWS_ACCESS_KEY_ID?: string;
@@ -86,6 +89,8 @@ export const getProviderConfig = () => {
86
89
 
87
90
  const ZEROONE_API_KEY = process.env.ZEROONE_API_KEY || '';
88
91
 
92
+ const TOGETHERAI_API_KEY = process.env.TOGETHERAI_API_KEY || '';
93
+
89
94
  // region format: iad1,sfo1
90
95
  let regions: string[] = [];
91
96
  if (process.env.OPENAI_FUNCTION_REGIONS) {
@@ -121,6 +126,9 @@ export const getProviderConfig = () => {
121
126
  OPENROUTER_API_KEY,
122
127
  OPENROUTER_CUSTOM_MODELS: process.env.OPENROUTER_CUSTOM_MODELS,
123
128
 
129
+ ENABLED_TOGETHERAI: !!TOGETHERAI_API_KEY,
130
+ TOGETHERAI_API_KEY,
131
+
124
132
  ENABLED_MOONSHOT: !!MOONSHOT_API_KEY,
125
133
  MOONSHOT_API_KEY,
126
134
  MOONSHOT_PROXY_URL: process.env.MOONSHOT_PROXY_URL,
@@ -96,6 +96,10 @@ export const DEFAULT_LLM_CONFIG: GlobalLLMConfig = {
96
96
  apiKey: '',
97
97
  enabled: false,
98
98
  },
99
+ togetherai: {
100
+ apiKey: '',
101
+ enabled: false,
102
+ },
99
103
  zeroone: {
100
104
  apiKey: '',
101
105
  enabled: false,
@@ -0,0 +1,40 @@
1
+ import { Together } from '@lobehub/icons';
2
+ import { Input } from 'antd';
3
+ import { memo } from 'react';
4
+ import { useTranslation } from 'react-i18next';
5
+
6
+ import { ModelProvider } from '@/libs/agent-runtime';
7
+ import { useGlobalStore } from '@/store/global';
8
+ import { modelProviderSelectors } from '@/store/global/selectors';
9
+
10
+ import { FormAction } from '../style';
11
+
12
+ const TogetherAIForm = memo(() => {
13
+ const { t } = useTranslation('error');
14
+ // const [showProxy, setShow] = useState(false);
15
+
16
+ const [apiKey, setConfig] = useGlobalStore((s) => [
17
+ modelProviderSelectors.togetheraiAPIKey(s),
18
+ s.setModelProviderConfig,
19
+ ]);
20
+
21
+ return (
22
+ <FormAction
23
+ avatar={<Together size={56} />}
24
+ description={t('unlock.apikey.TogetherAI.description')}
25
+ title={t('unlock.apikey.TogetherAI.title')}
26
+ >
27
+ <Input.Password
28
+ autoComplete={'new-password'}
29
+ onChange={(e) => {
30
+ setConfig(ModelProvider.TogetherAI, { apiKey: e.target.value });
31
+ }}
32
+ placeholder={'*********************************'}
33
+ type={'block'}
34
+ value={apiKey}
35
+ />
36
+ </FormAction>
37
+ );
38
+ });
39
+
40
+ export default TogetherAIForm;
@@ -15,6 +15,7 @@ import MoonshotForm from './Moonshot';
15
15
  import OpenAIForm from './OpenAI';
16
16
  import OpenRouterForm from './OpenRouter';
17
17
  import PerplexityForm from './Perplexity';
18
+ import TogetherAIForm from './TogetherAI';
18
19
  import ZeroOneForm from './ZeroOne';
19
20
  import ZhipuForm from './Zhipu';
20
21
 
@@ -66,6 +67,10 @@ const APIKeyForm = memo<APIKeyFormProps>(({ id, provider }) => {
66
67
  return <OpenRouterForm />;
67
68
  }
68
69
 
70
+ case ModelProvider.TogetherAI: {
71
+ return <TogetherAIForm />;
72
+ }
73
+
69
74
  case ModelProvider.ZeroOne: {
70
75
  return <ZeroOneForm />;
71
76
  }
@@ -76,6 +76,7 @@ const ErrorMessageExtra = memo<{ data: ChatMessage }>(({ data }) => {
76
76
  case AgentRuntimeErrorType.InvalidAnthropicAPIKey:
77
77
  case AgentRuntimeErrorType.InvalidGroqAPIKey:
78
78
  case AgentRuntimeErrorType.InvalidOpenRouterAPIKey:
79
+ case AgentRuntimeErrorType.InvalidTogetherAIAPIKey:
79
80
  case AgentRuntimeErrorType.InvalidZeroOneAPIKey:
80
81
  case AgentRuntimeErrorType.NoOpenAIAPIKey: {
81
82
  return <InvalidAPIKey id={data.id} provider={data.error?.body?.provider} />;
@@ -43,6 +43,9 @@ export const AgentRuntimeErrorType = {
43
43
 
44
44
  InvalidOpenRouterAPIKey: 'InvalidOpenRouterAPIKey',
45
45
  OpenRouterBizError: 'OpenRouterBizError',
46
+
47
+ InvalidTogetherAIAPIKey: 'InvalidTogetherAIAPIKey',
48
+ TogetherAIBizError: 'TogetherAIBizError',
46
49
  } as const;
47
50
 
48
51
  export type ILobeAgentRuntimeErrorType =
@@ -11,6 +11,7 @@ export { LobeOllamaAI } from './ollama';
11
11
  export { LobeOpenAI } from './openai';
12
12
  export { LobeOpenRouterAI } from './openrouter';
13
13
  export { LobePerplexityAI } from './perplexity';
14
+ export { LobeTogetherAI } from './togetherai';
14
15
  export * from './types';
15
16
  export { AgentRuntimeError } from './utils/createError';
16
17
  export { LobeZeroOneAI } from './zeroone';
@@ -2,7 +2,7 @@
2
2
  import OpenAI from 'openai';
3
3
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
4
 
5
- import { ChatStreamCallbacks } from '@/libs/agent-runtime';
5
+ import { ChatStreamCallbacks, OpenAIChatMessage } from '@/libs/agent-runtime';
6
6
 
7
7
  import * as debugStreamModule from '../utils/debugStream';
8
8
  import { LobeOllamaAI } from './index';
@@ -317,4 +317,49 @@ describe('LobeOllamaAI', () => {
317
317
  });
318
318
  });
319
319
  });
320
+
321
+ describe('private method', () => {
322
+ describe('convertContentToOllamaMessage', () => {
323
+ it('should format message array content of UserMessageContentPart to match ollama api', () => {
324
+ const message: OpenAIChatMessage = {
325
+ role: 'user',
326
+ content: [
327
+ {
328
+ type: 'text',
329
+ text: 'Hello',
330
+ },
331
+ {
332
+ type: 'image_url',
333
+ image_url: {
334
+ detail: 'auto',
335
+ url: 'data:image/png;base64,iVBO...',
336
+ },
337
+ },
338
+ ],
339
+ };
340
+
341
+ const ollamaMessage = instance['convertContentToOllamaMessage'](message);
342
+
343
+ expect(ollamaMessage).toEqual({
344
+ role: 'user',
345
+ content: 'Hello',
346
+ images: ['iVBO...'],
347
+ });
348
+ });
349
+
350
+ it('should not affect string type message content', () => {
351
+ const message: OpenAIChatMessage = {
352
+ role: 'user',
353
+ content: 'Hello',
354
+ };
355
+
356
+ const ollamaMessage = instance['convertContentToOllamaMessage'](message);
357
+
358
+ expect(ollamaMessage).toEqual({
359
+ role: 'user',
360
+ content: 'Hello',
361
+ });
362
+ });
363
+ });
364
+ });
320
365
  });
@@ -1,6 +1,8 @@
1
1
  import { OpenAIStream, StreamingTextResponse } from 'ai';
2
2
  import OpenAI, { ClientOptions } from 'openai';
3
3
 
4
+ import { OllamaChatMessage, OpenAIChatMessage } from '@/libs/agent-runtime';
5
+
4
6
  import { LobeRuntimeAI } from '../BaseAI';
5
7
  import { AgentRuntimeErrorType } from '../error';
6
8
  import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
@@ -8,6 +10,7 @@ import { AgentRuntimeError } from '../utils/createError';
8
10
  import { debugStream } from '../utils/debugStream';
9
11
  import { desensitizeUrl } from '../utils/desensitizeUrl';
10
12
  import { handleOpenAIError } from '../utils/handleOpenAIError';
13
+ import { parseDataUri } from '../utils/uriParser';
11
14
 
12
15
  const DEFAULT_BASE_URL = 'http://127.0.0.1:11434/v1';
13
16
 
@@ -25,6 +28,8 @@ export class LobeOllamaAI implements LobeRuntimeAI {
25
28
 
26
29
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
27
30
  try {
31
+ payload.messages = this.buildOllamaMessages(payload.messages);
32
+
28
33
  const response = await this.client.chat.completions.create(
29
34
  payload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
30
35
  );
@@ -73,6 +78,41 @@ export class LobeOllamaAI implements LobeRuntimeAI {
73
78
  });
74
79
  }
75
80
  }
81
+
82
+ private buildOllamaMessages(messages: OpenAIChatMessage[]) {
83
+ return messages.map((message) => this.convertContentToOllamaMessage(message));
84
+ }
85
+
86
+ private convertContentToOllamaMessage = (message: OpenAIChatMessage) => {
87
+ if (typeof message.content === 'string') {
88
+ return message;
89
+ }
90
+
91
+ const ollamaMessage: OllamaChatMessage = {
92
+ content: '',
93
+ role: message.role,
94
+ };
95
+
96
+ for (const content of message.content) {
97
+ switch (content.type) {
98
+ case 'text': {
99
+ // keep latest text input
100
+ ollamaMessage.content = content.text;
101
+ break;
102
+ }
103
+ case 'image_url': {
104
+ const { base64 } = parseDataUri(content.image_url.url);
105
+ if (base64) {
106
+ ollamaMessage.images ??= [];
107
+ ollamaMessage.images.push(base64);
108
+ }
109
+ break;
110
+ }
111
+ }
112
+ }
113
+
114
+ return ollamaMessage;
115
+ };
76
116
  }
77
117
 
78
118
  export default LobeOllamaAI;