@lobehub/chat 0.148.9 → 0.149.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/.env.example +1 -1
  2. package/CHANGELOG.md +50 -0
  3. package/docs/self-hosting/advanced/sso-providers/github.zh-CN.mdx +1 -3
  4. package/docs/self-hosting/environment-variables/model-provider.mdx +1 -1
  5. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +1 -1
  6. package/docs/self-hosting/examples/ollama.mdx +0 -1
  7. package/docs/self-hosting/examples/ollama.zh-CN.mdx +0 -2
  8. package/locales/ar/chat.json +4 -0
  9. package/locales/ar/error.json +1 -1
  10. package/locales/ar/modelProvider.json +27 -1
  11. package/locales/ar/welcome.json +1 -0
  12. package/locales/bg-BG/chat.json +4 -0
  13. package/locales/bg-BG/error.json +1 -1
  14. package/locales/bg-BG/modelProvider.json +27 -1
  15. package/locales/bg-BG/welcome.json +1 -0
  16. package/locales/de-DE/chat.json +4 -0
  17. package/locales/de-DE/error.json +1 -1
  18. package/locales/de-DE/modelProvider.json +27 -1
  19. package/locales/de-DE/welcome.json +1 -0
  20. package/locales/en-US/chat.json +4 -0
  21. package/locales/en-US/error.json +1 -1
  22. package/locales/en-US/modelProvider.json +27 -1
  23. package/locales/en-US/welcome.json +1 -0
  24. package/locales/es-ES/chat.json +4 -0
  25. package/locales/es-ES/error.json +1 -1
  26. package/locales/es-ES/modelProvider.json +27 -1
  27. package/locales/es-ES/welcome.json +1 -0
  28. package/locales/fr-FR/chat.json +4 -0
  29. package/locales/fr-FR/error.json +1 -1
  30. package/locales/fr-FR/modelProvider.json +27 -1
  31. package/locales/fr-FR/welcome.json +1 -0
  32. package/locales/it-IT/chat.json +4 -0
  33. package/locales/it-IT/error.json +1 -1
  34. package/locales/it-IT/modelProvider.json +26 -1
  35. package/locales/it-IT/welcome.json +1 -0
  36. package/locales/ja-JP/chat.json +4 -0
  37. package/locales/ja-JP/error.json +1 -1
  38. package/locales/ja-JP/modelProvider.json +27 -1
  39. package/locales/ja-JP/welcome.json +1 -0
  40. package/locales/ko-KR/chat.json +4 -0
  41. package/locales/ko-KR/error.json +1 -1
  42. package/locales/ko-KR/modelProvider.json +27 -1
  43. package/locales/ko-KR/welcome.json +1 -0
  44. package/locales/nl-NL/chat.json +4 -0
  45. package/locales/nl-NL/error.json +1 -1
  46. package/locales/nl-NL/modelProvider.json +27 -1
  47. package/locales/nl-NL/welcome.json +1 -0
  48. package/locales/pl-PL/chat.json +4 -0
  49. package/locales/pl-PL/error.json +1 -1
  50. package/locales/pl-PL/modelProvider.json +27 -1
  51. package/locales/pl-PL/welcome.json +1 -0
  52. package/locales/pt-BR/chat.json +4 -0
  53. package/locales/pt-BR/error.json +1 -1
  54. package/locales/pt-BR/modelProvider.json +27 -1
  55. package/locales/pt-BR/welcome.json +1 -0
  56. package/locales/ru-RU/chat.json +4 -0
  57. package/locales/ru-RU/error.json +1 -1
  58. package/locales/ru-RU/modelProvider.json +27 -1
  59. package/locales/ru-RU/welcome.json +1 -0
  60. package/locales/tr-TR/chat.json +4 -0
  61. package/locales/tr-TR/error.json +1 -1
  62. package/locales/tr-TR/modelProvider.json +27 -1
  63. package/locales/tr-TR/welcome.json +1 -0
  64. package/locales/vi-VN/chat.json +4 -0
  65. package/locales/vi-VN/error.json +1 -1
  66. package/locales/vi-VN/modelProvider.json +27 -1
  67. package/locales/vi-VN/welcome.json +1 -0
  68. package/locales/zh-CN/chat.json +5 -1
  69. package/locales/zh-CN/error.json +1 -1
  70. package/locales/zh-CN/modelProvider.json +27 -1
  71. package/locales/zh-CN/welcome.json +1 -0
  72. package/locales/zh-TW/chat.json +4 -0
  73. package/locales/zh-TW/error.json +1 -1
  74. package/locales/zh-TW/modelProvider.json +27 -1
  75. package/locales/zh-TW/welcome.json +1 -0
  76. package/package.json +2 -2
  77. package/src/app/api/chat/agentRuntime.test.ts +2 -2
  78. package/src/app/api/config/route.ts +2 -0
  79. package/src/app/settings/llm/Ollama/index.tsx +3 -6
  80. package/src/app/settings/llm/components/ProviderConfig/index.tsx +15 -14
  81. package/src/config/modelProviders/ollama.ts +38 -38
  82. package/src/const/settings/index.ts +1 -0
  83. package/src/features/Conversation/Error/{InvalidOllamaModel → OllamaBizError/InvalidOllamaModel}/index.tsx +1 -1
  84. package/src/features/Conversation/Error/OllamaBizError/SetupGuide.tsx +128 -0
  85. package/src/features/Conversation/Error/{OllamaBizError.tsx → OllamaBizError/index.tsx} +15 -2
  86. package/src/features/ModelSwitchPanel/index.tsx +0 -6
  87. package/src/libs/agent-runtime/ollama/index.ts +42 -57
  88. package/src/libs/agent-runtime/ollama/stream.ts +31 -0
  89. package/src/libs/agent-runtime/ollama/type.ts +8 -0
  90. package/src/libs/agent-runtime/types/chat.ts +0 -7
  91. package/src/libs/agent-runtime/zeroone/index.test.ts +16 -16
  92. package/src/locales/default/error.ts +2 -1
  93. package/src/locales/default/modelProvider.ts +29 -1
  94. package/src/migrations/FromV3ToV4/fixtures/ollama-output-v4.json +0 -1
  95. package/src/services/__tests__/chat.test.ts +1 -1
  96. package/src/services/ollama.ts +6 -9
  97. package/src/store/global/slices/settings/actions/llm.test.ts +0 -1
  98. package/src/store/global/slices/settings/selectors/modelProvider.test.ts +1 -1
  99. package/src/types/serverConfig.ts +1 -1
  100. package/src/libs/agent-runtime/ollama/index.test.ts +0 -365
  101. /package/src/features/Conversation/Error/{InvalidOllamaModel → OllamaBizError/InvalidOllamaModel}/useDownloadMonitor.ts +0 -0
@@ -0,0 +1,128 @@
1
+ import { Highlighter, Snippet } from '@lobehub/ui';
2
+ import { Tab, Tabs } from '@lobehub/ui/mdx';
3
+ import { Steps } from 'antd';
4
+ import { createStyles } from 'antd-style';
5
+ import Link from 'next/link';
6
+ import { memo } from 'react';
7
+ import { Trans, useTranslation } from 'react-i18next';
8
+ import { Flexbox } from 'react-layout-kit';
9
+
10
+ const useStyles = createStyles(({ css, prefixCls }) => ({
11
+ steps: css`
12
+ &.${prefixCls}-steps-small .${prefixCls}-steps-item-title {
13
+ margin-bottom: 16px;
14
+ font-size: 16px;
15
+ font-weight: bold;
16
+ }
17
+ `,
18
+ }));
19
+
20
+ const SetupGuide = memo(() => {
21
+ const { styles } = useStyles();
22
+ const { t } = useTranslation('modelProvider');
23
+ return (
24
+ <Flexbox paddingBlock={8}>
25
+ <Steps
26
+ className={styles.steps}
27
+ direction={'vertical'}
28
+ items={[
29
+ {
30
+ description: (
31
+ <Flexbox>
32
+ {t('ollama.setup.install.description')}
33
+ <Tabs items={['macOS', t('ollama.setup.install.windowsTab'), 'Linux', 'Docker']}>
34
+ <Tab>
35
+ <Trans i18nKey={'ollama.setup.install.macos'} ns={'modelProvider'}>
36
+ <Link href={'https://ollama.com/download'}>下载 Ollama for macOS</Link>
37
+ 并解压。
38
+ </Trans>
39
+ </Tab>
40
+ <Tab>
41
+ <Trans i18nKey={'ollama.setup.install.windows'} ns={'modelProvider'}>
42
+ <Link href={'https://ollama.com/download'}>下载 Ollama for macOS</Link>
43
+ 并解压。
44
+ </Trans>
45
+ </Tab>
46
+ <Tab>
47
+ <Flexbox gap={8}>
48
+ {t('ollama.setup.install.linux.command')}
49
+ <Snippet language={'bash'}>
50
+ curl -fsSL https://ollama.com/install.sh | sh
51
+ </Snippet>
52
+ <div>
53
+ <Trans i18nKey={'ollama.setup.install.linux.manual'} ns={'modelProvider'}>
54
+ 或者,你也可以参考
55
+ <Link href={'https://github.com/ollama/ollama/blob/main/docs/linux.md'}>
56
+ Linux 手动安装指南
57
+ </Link>
58
+
59
+ </Trans>
60
+ </div>
61
+ </Flexbox>
62
+ </Tab>
63
+ <Tab>
64
+ <Flexbox gap={8}>
65
+ {t('ollama.setup.install.docker')}
66
+ <Snippet language={'bash'}>docker pull ollama/ollama</Snippet>
67
+ </Flexbox>
68
+ </Tab>
69
+ </Tabs>
70
+ </Flexbox>
71
+ ),
72
+ status: 'process',
73
+ title: t('ollama.setup.install.title'),
74
+ },
75
+ {
76
+ description: (
77
+ <Flexbox>
78
+ {t('ollama.setup.cors.description')}
79
+
80
+ <Tabs items={['macOS', t('ollama.setup.install.windowsTab'), 'Linux']}>
81
+ <Tab>
82
+ <Flexbox gap={8}>
83
+ {t('ollama.setup.cors.macos')}
84
+ {/* eslint-disable-next-line react/no-unescaped-entities */}
85
+ <Snippet language={'bash'}>launchctl setenv OLLAMA_ORIGINS "*"</Snippet>
86
+ {t('ollama.setup.cors.reboot')}
87
+ </Flexbox>
88
+ </Tab>
89
+ <Tab>
90
+ <Flexbox gap={8}>
91
+ <div>{t('ollama.setup.cors.windows')}</div>
92
+ <div>{t('ollama.setup.cors.reboot')}</div>
93
+ </Flexbox>
94
+ </Tab>
95
+ <Tab>
96
+ {' '}
97
+ <Flexbox gap={8}>
98
+ {t('ollama.setup.cors.linux.systemd')}
99
+ {/* eslint-disable-next-line react/no-unescaped-entities */}
100
+ <Snippet language={'bash'}> sudo systemctl edit ollama.service</Snippet>
101
+ {t('ollama.setup.cors.linux.env')}
102
+ <Highlighter
103
+ // eslint-disable-next-line react/no-children-prop
104
+ children={`[Service]
105
+
106
+ Environment="OLLAMA_ORIGINS=*"`}
107
+ fileName={'ollama.service'}
108
+ fullFeatured
109
+ language={'bash'}
110
+ showLanguage
111
+ />
112
+ {t('ollama.setup.cors.linux.reboot')}
113
+ </Flexbox>
114
+ </Tab>
115
+ </Tabs>
116
+ </Flexbox>
117
+ ),
118
+ status: 'process',
119
+ title: t('ollama.setup.cors.title'),
120
+ },
121
+ ]}
122
+ size={'small'}
123
+ />
124
+ </Flexbox>
125
+ );
126
+ });
127
+
128
+ export default SetupGuide;
@@ -1,9 +1,16 @@
1
+ import { Skeleton } from 'antd';
2
+ import dynamic from 'next/dynamic';
1
3
  import { memo } from 'react';
2
4
 
3
5
  import { ChatMessage } from '@/types/message';
4
6
 
5
- import ErrorJsonViewer from './ErrorJsonViewer';
6
- import InvalidModel from './InvalidOllamaModel';
7
+ import ErrorJsonViewer from '../ErrorJsonViewer';
8
+
9
+ const loading = () => <Skeleton active style={{ width: 300 }} />;
10
+
11
+ const SetupGuide = dynamic(() => import('./SetupGuide'), { loading, ssr: false });
12
+
13
+ const InvalidModel = dynamic(() => import('./InvalidOllamaModel'), { loading, ssr: false });
7
14
 
8
15
  interface OllamaError {
9
16
  code: string | null;
@@ -23,11 +30,17 @@ const OllamaBizError = memo<ChatMessage>(({ error, id }) => {
23
30
 
24
31
  const errorMessage = errorBody.error?.message;
25
32
 
33
+ // error of not pull the model
26
34
  const unresolvedModel = errorMessage?.match(UNRESOLVED_MODEL_REGEXP)?.[1];
27
35
  if (unresolvedModel) {
28
36
  return <InvalidModel id={id} model={unresolvedModel} />;
29
37
  }
30
38
 
39
+ // error of not enable model or not set the CORS rules
40
+ if (errorMessage?.includes('Failed to fetch')) {
41
+ return <SetupGuide />;
42
+ }
43
+
31
44
  return <ErrorJsonViewer error={error} id={id} />;
32
45
  });
33
46
 
@@ -79,12 +79,6 @@ const ModelSwitchPanel = memo<PropsWithChildren>(({ children }) => {
79
79
  return items;
80
80
  };
81
81
 
82
- // If there is only one provider, just remove the group, show model directly
83
- if (enabledList.length === 1) {
84
- const provider = enabledList[0];
85
- return getModelItems(provider);
86
- }
87
-
88
82
  // otherwise show with provider group
89
83
  return enabledList.map((provider) => ({
90
84
  children: getModelItems(provider),
@@ -1,94 +1,79 @@
1
- import { OpenAIStream, StreamingTextResponse } from 'ai';
2
- import OpenAI, { ClientOptions } from 'openai';
1
+ import { StreamingTextResponse } from 'ai';
2
+ import { Ollama } from 'ollama/browser';
3
+ import { ClientOptions } from 'openai';
3
4
 
4
- import { OllamaChatMessage, OpenAIChatMessage } from '@/libs/agent-runtime';
5
+ import { OpenAIChatMessage } from '@/libs/agent-runtime';
6
+ import { OllamaStream } from '@/libs/agent-runtime/ollama/stream';
5
7
 
6
8
  import { LobeRuntimeAI } from '../BaseAI';
7
9
  import { AgentRuntimeErrorType } from '../error';
8
10
  import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
9
11
  import { AgentRuntimeError } from '../utils/createError';
10
- import { debugStream } from '../utils/debugStream';
11
- import { desensitizeUrl } from '../utils/desensitizeUrl';
12
- import { handleOpenAIError } from '../utils/handleOpenAIError';
13
12
  import { parseDataUri } from '../utils/uriParser';
14
-
15
- const DEFAULT_BASE_URL = 'http://127.0.0.1:11434/v1';
13
+ import { OllamaMessage } from './type';
16
14
 
17
15
  export class LobeOllamaAI implements LobeRuntimeAI {
18
- private client: OpenAI;
16
+ private client: Ollama;
19
17
 
20
- baseURL: string;
18
+ baseURL?: string;
21
19
 
22
- constructor({ apiKey = 'ollama', baseURL = DEFAULT_BASE_URL, ...res }: ClientOptions) {
23
- if (!baseURL) throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidOllamaArgs);
20
+ constructor({ baseURL }: ClientOptions) {
21
+ try {
22
+ if (baseURL) new URL(baseURL);
23
+ } catch {
24
+ throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidOllamaArgs);
25
+ }
24
26
 
25
- this.client = new OpenAI({ apiKey, baseURL, ...res });
26
- this.baseURL = baseURL;
27
+ this.client = new Ollama(!baseURL ? undefined : { host: new URL(baseURL).host });
28
+
29
+ if (baseURL) this.baseURL = baseURL;
27
30
  }
28
31
 
29
32
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
30
33
  try {
31
- payload.messages = this.buildOllamaMessages(payload.messages);
32
-
33
- const response = await this.client.chat.completions.create(
34
- payload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
35
- );
36
- const [prod, debug] = response.tee();
37
-
38
- if (process.env.DEBUG_OLLAMA_CHAT_COMPLETION === '1') {
39
- debugStream(debug.toReadableStream()).catch(console.error);
40
- }
34
+ const response = await this.client.chat({
35
+ messages: this.buildOllamaMessages(payload.messages),
36
+ model: payload.model,
37
+ options: {
38
+ frequency_penalty: payload.frequency_penalty,
39
+ presence_penalty: payload.presence_penalty,
40
+ temperature: payload.temperature,
41
+ top_p: payload.top_p,
42
+ },
43
+ stream: true,
44
+ });
41
45
 
42
- return new StreamingTextResponse(OpenAIStream(prod, options?.callback), {
46
+ return new StreamingTextResponse(OllamaStream(response, options?.callback), {
43
47
  headers: options?.headers,
44
48
  });
45
49
  } catch (error) {
46
- let desensitizedEndpoint = this.baseURL;
47
-
48
- if (this.baseURL !== DEFAULT_BASE_URL) {
49
- desensitizedEndpoint = desensitizeUrl(this.baseURL);
50
- }
51
-
52
- if ('status' in (error as any)) {
53
- switch ((error as Response).status) {
54
- case 401: {
55
- throw AgentRuntimeError.chat({
56
- endpoint: desensitizedEndpoint,
57
- error: error as any,
58
- errorType: AgentRuntimeErrorType.InvalidOllamaArgs,
59
- provider: ModelProvider.Ollama,
60
- });
61
- }
62
-
63
- default: {
64
- break;
65
- }
66
- }
67
- }
68
-
69
- const { errorResult, RuntimeError } = handleOpenAIError(error);
70
-
71
- const errorType = RuntimeError || AgentRuntimeErrorType.OllamaBizError;
50
+ const e = error as { message: string; name: string; status_code: number };
72
51
 
73
52
  throw AgentRuntimeError.chat({
74
- endpoint: desensitizedEndpoint,
75
- error: errorResult,
76
- errorType,
53
+ error: { message: e.message, name: e.name, status_code: e.status_code },
54
+ errorType: AgentRuntimeErrorType.OllamaBizError,
77
55
  provider: ModelProvider.Ollama,
78
56
  });
79
57
  }
80
58
  }
81
59
 
60
+ // async models(): Promise<ChatModelCard[]> {
61
+ // const list = await this.client.list();
62
+ // return list.models.map((model) => ({
63
+ // id: model.name,
64
+ // }));
65
+ // }
66
+
82
67
  private buildOllamaMessages(messages: OpenAIChatMessage[]) {
83
68
  return messages.map((message) => this.convertContentToOllamaMessage(message));
84
69
  }
85
70
 
86
- private convertContentToOllamaMessage = (message: OpenAIChatMessage) => {
71
+ private convertContentToOllamaMessage = (message: OpenAIChatMessage): OllamaMessage => {
87
72
  if (typeof message.content === 'string') {
88
- return message;
73
+ return { content: message.content, role: message.role };
89
74
  }
90
75
 
91
- const ollamaMessage: OllamaChatMessage = {
76
+ const ollamaMessage: OllamaMessage = {
92
77
  content: '',
93
78
  role: message.role,
94
79
  };
@@ -0,0 +1,31 @@
1
+ // copy from https://github.com/vercel/ai/discussions/539#discussioncomment-8193721
2
+ // and I have remove the unnecessary code
3
+ import {
4
+ type AIStreamCallbacksAndOptions,
5
+ createCallbacksTransformer,
6
+ createStreamDataTransformer,
7
+ readableFromAsyncIterable,
8
+ } from 'ai';
9
+ import { ChatResponse } from 'ollama/browser';
10
+
11
+ // A modified version of the streamable function specifically for chat messages
12
+ const chatStreamable = async function* (stream: AsyncIterable<ChatResponse>) {
13
+ for await (const response of stream) {
14
+ if (response.message) {
15
+ yield response.message;
16
+ }
17
+ if (response.done) {
18
+ // Additional final response data can be handled here if necessary
19
+ return;
20
+ }
21
+ }
22
+ };
23
+
24
+ export const OllamaStream = (
25
+ res: AsyncIterable<ChatResponse>,
26
+ cb?: AIStreamCallbacksAndOptions,
27
+ ): ReadableStream<string> => {
28
+ return readableFromAsyncIterable(chatStreamable(res))
29
+ .pipeThrough(createCallbacksTransformer(cb) as any)
30
+ .pipeThrough(createStreamDataTransformer(cb?.experimental_streamData));
31
+ };
@@ -0,0 +1,8 @@
1
+ /**
2
+ * @description images for ollama vision models (https://ollama.com/blog/vision-models)
3
+ */
4
+ export interface OllamaMessage {
5
+ content: string;
6
+ images?: string[];
7
+ role: string;
8
+ }
@@ -127,10 +127,3 @@ export interface ChatCompletionTool {
127
127
  }
128
128
 
129
129
  export type ChatStreamCallbacks = OpenAIStreamCallbacks;
130
-
131
- export interface OllamaChatMessage extends OpenAIChatMessage {
132
- /**
133
- * @description images for ollama vision models (https://ollama.com/blog/vision-models)
134
- */
135
- images?: string[];
136
- }
@@ -50,7 +50,7 @@ describe('LobeZeroOneAI', () => {
50
50
  // Act
51
51
  const result = await instance.chat({
52
52
  messages: [{ content: 'Hello', role: 'user' }],
53
- model: 'mistralai/mistral-7b-instruct:free',
53
+ model: 'yi-34b-chat-0205',
54
54
  temperature: 0,
55
55
  });
56
56
 
@@ -69,7 +69,7 @@ describe('LobeZeroOneAI', () => {
69
69
  const result = await instance.chat({
70
70
  max_tokens: 1024,
71
71
  messages: [{ content: 'Hello', role: 'user' }],
72
- model: 'mistralai/mistral-7b-instruct:free',
72
+ model: 'yi-34b-chat-0205',
73
73
  temperature: 0.7,
74
74
  top_p: 1,
75
75
  });
@@ -79,7 +79,7 @@ describe('LobeZeroOneAI', () => {
79
79
  {
80
80
  max_tokens: 1024,
81
81
  messages: [{ content: 'Hello', role: 'user' }],
82
- model: 'mistralai/mistral-7b-instruct:free',
82
+ model: 'yi-34b-chat-0205',
83
83
  temperature: 0.7,
84
84
  top_p: 1,
85
85
  },
@@ -89,7 +89,7 @@ describe('LobeZeroOneAI', () => {
89
89
  });
90
90
 
91
91
  describe('Error', () => {
92
- it('should return OpenRouterBizError with an openai error response when OpenAI.APIError is thrown', async () => {
92
+ it('should return ZeroOneBizError with an openai error response when OpenAI.APIError is thrown', async () => {
93
93
  // Arrange
94
94
  const apiError = new OpenAI.APIError(
95
95
  400,
@@ -109,7 +109,7 @@ describe('LobeZeroOneAI', () => {
109
109
  try {
110
110
  await instance.chat({
111
111
  messages: [{ content: 'Hello', role: 'user' }],
112
- model: 'mistralai/mistral-7b-instruct:free',
112
+ model: 'yi-34b-chat-0205',
113
113
  temperature: 0,
114
114
  });
115
115
  } catch (e) {
@@ -125,7 +125,7 @@ describe('LobeZeroOneAI', () => {
125
125
  }
126
126
  });
127
127
 
128
- it('should throw AgentRuntimeError with InvalidOpenRouterAPIKey if no apiKey is provided', async () => {
128
+ it('should throw AgentRuntimeError with InvalidZeroOneAPIKey if no apiKey is provided', async () => {
129
129
  try {
130
130
  new LobeZeroOneAI({});
131
131
  } catch (e) {
@@ -133,7 +133,7 @@ describe('LobeZeroOneAI', () => {
133
133
  }
134
134
  });
135
135
 
136
- it('should return OpenRouterBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
136
+ it('should return ZeroOneBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
137
137
  // Arrange
138
138
  const errorInfo = {
139
139
  stack: 'abc',
@@ -149,7 +149,7 @@ describe('LobeZeroOneAI', () => {
149
149
  try {
150
150
  await instance.chat({
151
151
  messages: [{ content: 'Hello', role: 'user' }],
152
- model: 'mistralai/mistral-7b-instruct:free',
152
+ model: 'yi-34b-chat-0205',
153
153
  temperature: 0,
154
154
  });
155
155
  } catch (e) {
@@ -165,7 +165,7 @@ describe('LobeZeroOneAI', () => {
165
165
  }
166
166
  });
167
167
 
168
- it('should return OpenRouterBizError with an cause response with desensitize Url', async () => {
168
+ it('should return ZeroOneBizError with an cause response with desensitize Url', async () => {
169
169
  // Arrange
170
170
  const errorInfo = {
171
171
  stack: 'abc',
@@ -185,7 +185,7 @@ describe('LobeZeroOneAI', () => {
185
185
  try {
186
186
  await instance.chat({
187
187
  messages: [{ content: 'Hello', role: 'user' }],
188
- model: 'mistralai/mistral-7b-instruct:free',
188
+ model: 'yi-34b-chat-0205',
189
189
  temperature: 0,
190
190
  });
191
191
  } catch (e) {
@@ -201,7 +201,7 @@ describe('LobeZeroOneAI', () => {
201
201
  }
202
202
  });
203
203
 
204
- it('should throw an InvalidOpenRouterAPIKey error type on 401 status code', async () => {
204
+ it('should throw an InvalidZeroOneAPIKey error type on 401 status code', async () => {
205
205
  // Mock the API call to simulate a 401 error
206
206
  const error = new Error('Unauthorized') as any;
207
207
  error.status = 401;
@@ -210,7 +210,7 @@ describe('LobeZeroOneAI', () => {
210
210
  try {
211
211
  await instance.chat({
212
212
  messages: [{ content: 'Hello', role: 'user' }],
213
- model: 'mistralai/mistral-7b-instruct:free',
213
+ model: 'yi-34b-chat-0205',
214
214
  temperature: 0,
215
215
  });
216
216
  } catch (e) {
@@ -234,7 +234,7 @@ describe('LobeZeroOneAI', () => {
234
234
  try {
235
235
  await instance.chat({
236
236
  messages: [{ content: 'Hello', role: 'user' }],
237
- model: 'mistralai/mistral-7b-instruct:free',
237
+ model: 'yi-34b-chat-0205',
238
238
  temperature: 0,
239
239
  });
240
240
  } catch (e) {
@@ -265,7 +265,7 @@ describe('LobeZeroOneAI', () => {
265
265
  id: 'chatcmpl-8xDx5AETP8mESQN7UB30GxTN2H1SO',
266
266
  object: 'chat.completion.chunk',
267
267
  created: 1709125675,
268
- model: 'mistralai/mistral-7b-instruct:free',
268
+ model: 'yi-34b-chat-0205',
269
269
  system_fingerprint: 'fp_86156a94a0',
270
270
  choices: [
271
271
  { index: 0, delta: { content: 'hello' }, logprobs: null, finish_reason: null },
@@ -287,7 +287,7 @@ describe('LobeZeroOneAI', () => {
287
287
  const result = await instance.chat(
288
288
  {
289
289
  messages: [{ content: 'Hello', role: 'user' }],
290
- model: 'mistralai/mistral-7b-instruct:free',
290
+ model: 'yi-34b-chat-0205',
291
291
  temperature: 0,
292
292
  },
293
293
  { callback: mockCallback, headers: mockHeaders },
@@ -335,7 +335,7 @@ describe('LobeZeroOneAI', () => {
335
335
  // 假设的测试函数调用,你可能需要根据实际情况调整
336
336
  await instance.chat({
337
337
  messages: [{ content: 'Hello', role: 'user' }],
338
- model: 'mistralai/mistral-7b-instruct:free',
338
+ model: 'yi-34b-chat-0205',
339
339
  temperature: 0,
340
340
  });
341
341
 
@@ -96,7 +96,8 @@ export default {
96
96
 
97
97
  InvalidOllamaArgs: 'Ollama 配置不正确,请检查 Ollama 配置后重试',
98
98
  OllamaBizError: '请求 Ollama 服务出错,请根据以下信息排查或重试',
99
- OllamaServiceUnavailable: '未检测到 Ollama 服务,请检查是否正常启动',
99
+ OllamaServiceUnavailable:
100
+ 'Ollama 服务连接失败,请检查 Ollama 是否运行正常,或是否正确设置 Ollama 的跨域配置',
100
101
 
101
102
  AgentRuntimeError: 'Lobe AI Runtime 执行出错,请根据以下信息排查或重试',
102
103
  /* eslint-enable */
@@ -132,9 +132,37 @@ export default {
132
132
  },
133
133
  endpoint: {
134
134
  desc: '填入 Ollama 接口代理地址,本地未额外指定可留空',
135
- placeholder: 'http://127.0.0.1:11434/v1',
135
+ placeholder: 'http://127.0.0.1:11434',
136
136
  title: '接口代理地址',
137
137
  },
138
+ setup: {
139
+ cors: {
140
+ description: '因浏览器安全限制,你需要为 Ollama 进行跨域配置后方可正常使用。',
141
+ linux: {
142
+ env: '在 [Service] 部分下添加 `Environment`,添加 OLLAMA_ORIGINS 环境变量:',
143
+ reboot: '重载 systemd 并重启 Ollama',
144
+ systemd: '调用 systemd 编辑 ollama 服务:',
145
+ },
146
+ macos: '请打开「终端」应用程序,并粘贴以下指令,并按回车运行',
147
+ reboot: '请在执行完成后重启 Ollama 服务',
148
+ title: '配置 Ollama 允许跨域访问',
149
+ windows:
150
+ '在 Windows 上,点击「控制面板」,进入编辑系统环境变量。为您的用户账户新建名为 「OLLAMA_ORIGINS」 的环境变量,值为 * ,点击 「OK/应用」 保存',
151
+ },
152
+ install: {
153
+ description: '请确认你已经开启 Ollama ,如果没有下载 Ollama ,请前往官网下载',
154
+ docker:
155
+ '如果你更倾向于使用 Docker,Ollama 也提供了官方 Docker 镜像,你可以通过以下命令拉取:',
156
+ linux: {
157
+ command: '通过以下命令安装:',
158
+ manual: '或者,你也可以参考 <1>Linux 手动安装指南</1> 自行安装',
159
+ },
160
+ macos: '<0>下载 macOS 版 Ollama</0>,解压并安装',
161
+ title: '在本地安装并开启 Ollama 应用',
162
+ windows: '<0>下载 Windows 版 Ollama</0>,解压并安装',
163
+ windowsTab: 'Windows (预览版)',
164
+ },
165
+ },
138
166
  title: 'Ollama',
139
167
  },
140
168
  openai: {
@@ -47,7 +47,6 @@
47
47
  {
48
48
  "displayName": "LLaVA 7B",
49
49
  "enabled": true,
50
- "functionCall": false,
51
50
  "id": "llava",
52
51
  "tokens": 4000,
53
52
  "vision": true
@@ -771,7 +771,7 @@ describe('AgentRuntimeOnClient', () => {
771
771
  settings: {
772
772
  languageModel: {
773
773
  ollama: {
774
- endpoint: 'user-ollama-endpoint',
774
+ endpoint: 'http://127.0.0.1:1234',
775
775
  },
776
776
  },
777
777
  },
@@ -7,7 +7,7 @@ import { modelConfigSelectors } from '@/store/global/selectors';
7
7
  import { ChatErrorType } from '@/types/fetch';
8
8
  import { getMessageError } from '@/utils/fetch';
9
9
 
10
- const DEFAULT_BASE_URL = 'http://127.0.0.1:11434/v1';
10
+ const DEFAULT_BASE_URL = 'http://127.0.0.1:11434';
11
11
 
12
12
  interface OllamaServiceParams {
13
13
  fetch?: typeof fetch;
@@ -51,14 +51,13 @@ export class OllamaService {
51
51
  } catch {
52
52
  response = createErrorResponse(ChatErrorType.OllamaServiceUnavailable, {
53
53
  host: this.getHost(),
54
- message: 'please check whether your ollama service is available',
54
+ message: 'please check whether your ollama service is available or set the CORS rules',
55
55
  provider: ModelProvider.Ollama,
56
56
  });
57
57
  }
58
58
 
59
59
  if (!response.ok) {
60
- const messageError = await getMessageError(response);
61
- throw messageError;
60
+ throw await getMessageError(response);
62
61
  }
63
62
  return response.json();
64
63
  };
@@ -66,19 +65,17 @@ export class OllamaService {
66
65
  getModels = async (): Promise<ListResponse> => {
67
66
  let response: Response | ListResponse;
68
67
  try {
69
- const response = await this.getOllamaClient().list();
70
- return response;
68
+ return await this.getOllamaClient().list();
71
69
  } catch {
72
70
  response = createErrorResponse(ChatErrorType.OllamaServiceUnavailable, {
73
71
  host: this.getHost(),
74
- message: 'please check whether your ollama service is available',
72
+ message: 'please check whether your ollama service is available or set the CORS rules',
75
73
  provider: ModelProvider.Ollama,
76
74
  });
77
75
  }
78
76
 
79
77
  if (!response.ok) {
80
- const messageError = await getMessageError(response);
81
- throw messageError;
78
+ throw await getMessageError(response);
82
79
  }
83
80
  return response.json();
84
81
  };
@@ -110,7 +110,6 @@ describe('LLMSettingsSliceAction', () => {
110
110
  // Assert that setModelProviderConfig was not called
111
111
  expect(ollamaList?.chatModels.find((c) => c.id === 'llava')).toEqual({
112
112
  displayName: 'LLaVA 7B',
113
- functionCall: false,
114
113
  enabled: true,
115
114
  id: 'llava',
116
115
  tokens: 4000,
@@ -85,7 +85,7 @@ describe('modelProviderSelectors', () => {
85
85
  });
86
86
 
87
87
  describe('modelEnabledFiles', () => {
88
- it.skip('should return false if the model does not have file ability', () => {
88
+ it('should return false if the model does not have file ability', () => {
89
89
  const enabledFiles = modelProviderSelectors.isModelEnabledFiles('gpt-4-vision-preview')(
90
90
  useGlobalStore.getState(),
91
91
  );
@@ -5,8 +5,8 @@ import { GlobalDefaultAgent, GlobalLLMProviderKey } from '@/types/settings';
5
5
 
6
6
  export interface ServerModelProviderConfig {
7
7
  enabled?: boolean;
8
-
9
8
  enabledModels?: string[];
9
+ fetchOnClient?: boolean;
10
10
  /**
11
11
  * the model cards defined in server
12
12
  */