@lobehub/chat 0.144.0 → 0.144.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,31 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
### [Version 0.144.1](https://github.com/lobehub/lobe-chat/compare/v0.144.0...v0.144.1)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2024-03-29**</sup>
|
|
8
|
+
|
|
9
|
+
#### 🐛 Bug Fixes
|
|
10
|
+
|
|
11
|
+
- **ollama**: Suppport vision for LLaVA models.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### What's fixed
|
|
19
|
+
|
|
20
|
+
- **ollama**: Suppport vision for LLaVA models, closes [#1791](https://github.com/lobehub/lobe-chat/issues/1791) ([e2d3de6](https://github.com/lobehub/lobe-chat/commit/e2d3de6))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
5
30
|
## [Version 0.144.0](https://github.com/lobehub/lobe-chat/compare/v0.143.0...v0.144.0)
|
|
6
31
|
|
|
7
32
|
<sup>Released on **2024-03-29**</sup>
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/chat",
|
|
3
|
-
"version": "0.144.
|
|
3
|
+
"version": "0.144.1",
|
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -135,7 +135,7 @@ const Ollama: ModelProviderCard = {
|
|
|
135
135
|
hidden: true,
|
|
136
136
|
id: 'llava',
|
|
137
137
|
tokens: 4000,
|
|
138
|
-
vision:
|
|
138
|
+
vision: true,
|
|
139
139
|
},
|
|
140
140
|
{
|
|
141
141
|
displayName: 'LLaVA 13B',
|
|
@@ -143,7 +143,7 @@ const Ollama: ModelProviderCard = {
|
|
|
143
143
|
hidden: true,
|
|
144
144
|
id: 'llava:13b',
|
|
145
145
|
tokens: 4000,
|
|
146
|
-
vision:
|
|
146
|
+
vision: true,
|
|
147
147
|
},
|
|
148
148
|
{
|
|
149
149
|
displayName: 'LLaVA 34B',
|
|
@@ -151,7 +151,7 @@ const Ollama: ModelProviderCard = {
|
|
|
151
151
|
hidden: true,
|
|
152
152
|
id: 'llava:34b',
|
|
153
153
|
tokens: 4000,
|
|
154
|
-
vision:
|
|
154
|
+
vision: true,
|
|
155
155
|
},
|
|
156
156
|
],
|
|
157
157
|
id: 'ollama',
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
import OpenAI from 'openai';
|
|
3
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
4
|
|
|
5
|
-
import { ChatStreamCallbacks } from '@/libs/agent-runtime';
|
|
5
|
+
import { ChatStreamCallbacks, OpenAIChatMessage } from '@/libs/agent-runtime';
|
|
6
6
|
|
|
7
7
|
import * as debugStreamModule from '../utils/debugStream';
|
|
8
8
|
import { LobeOllamaAI } from './index';
|
|
@@ -317,4 +317,49 @@ describe('LobeOllamaAI', () => {
|
|
|
317
317
|
});
|
|
318
318
|
});
|
|
319
319
|
});
|
|
320
|
+
|
|
321
|
+
describe('private method', () => {
|
|
322
|
+
describe('convertContentToOllamaMessage', () => {
|
|
323
|
+
it('should format message array content of UserMessageContentPart to match ollama api', () => {
|
|
324
|
+
const message: OpenAIChatMessage = {
|
|
325
|
+
role: 'user',
|
|
326
|
+
content: [
|
|
327
|
+
{
|
|
328
|
+
type: 'text',
|
|
329
|
+
text: 'Hello',
|
|
330
|
+
},
|
|
331
|
+
{
|
|
332
|
+
type: 'image_url',
|
|
333
|
+
image_url: {
|
|
334
|
+
detail: 'auto',
|
|
335
|
+
url: 'data:image/png;base64,iVBO...',
|
|
336
|
+
},
|
|
337
|
+
},
|
|
338
|
+
],
|
|
339
|
+
};
|
|
340
|
+
|
|
341
|
+
const ollamaMessage = instance['convertContentToOllamaMessage'](message);
|
|
342
|
+
|
|
343
|
+
expect(ollamaMessage).toEqual({
|
|
344
|
+
role: 'user',
|
|
345
|
+
content: 'Hello',
|
|
346
|
+
images: ['iVBO...'],
|
|
347
|
+
});
|
|
348
|
+
});
|
|
349
|
+
|
|
350
|
+
it('should not affect string type message content', () => {
|
|
351
|
+
const message: OpenAIChatMessage = {
|
|
352
|
+
role: 'user',
|
|
353
|
+
content: 'Hello',
|
|
354
|
+
};
|
|
355
|
+
|
|
356
|
+
const ollamaMessage = instance['convertContentToOllamaMessage'](message);
|
|
357
|
+
|
|
358
|
+
expect(ollamaMessage).toEqual({
|
|
359
|
+
role: 'user',
|
|
360
|
+
content: 'Hello',
|
|
361
|
+
});
|
|
362
|
+
});
|
|
363
|
+
});
|
|
364
|
+
});
|
|
320
365
|
});
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import { OpenAIStream, StreamingTextResponse } from 'ai';
|
|
2
2
|
import OpenAI, { ClientOptions } from 'openai';
|
|
3
3
|
|
|
4
|
+
import { OllamaChatMessage, OpenAIChatMessage } from '@/libs/agent-runtime';
|
|
5
|
+
|
|
4
6
|
import { LobeRuntimeAI } from '../BaseAI';
|
|
5
7
|
import { AgentRuntimeErrorType } from '../error';
|
|
6
8
|
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
|
@@ -8,6 +10,7 @@ import { AgentRuntimeError } from '../utils/createError';
|
|
|
8
10
|
import { debugStream } from '../utils/debugStream';
|
|
9
11
|
import { desensitizeUrl } from '../utils/desensitizeUrl';
|
|
10
12
|
import { handleOpenAIError } from '../utils/handleOpenAIError';
|
|
13
|
+
import { parseDataUri } from '../utils/uriParser';
|
|
11
14
|
|
|
12
15
|
const DEFAULT_BASE_URL = 'http://127.0.0.1:11434/v1';
|
|
13
16
|
|
|
@@ -25,6 +28,8 @@ export class LobeOllamaAI implements LobeRuntimeAI {
|
|
|
25
28
|
|
|
26
29
|
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
|
27
30
|
try {
|
|
31
|
+
payload.messages = this.buildOllamaMessages(payload.messages);
|
|
32
|
+
|
|
28
33
|
const response = await this.client.chat.completions.create(
|
|
29
34
|
payload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
|
|
30
35
|
);
|
|
@@ -73,6 +78,41 @@ export class LobeOllamaAI implements LobeRuntimeAI {
|
|
|
73
78
|
});
|
|
74
79
|
}
|
|
75
80
|
}
|
|
81
|
+
|
|
82
|
+
private buildOllamaMessages(messages: OpenAIChatMessage[]) {
|
|
83
|
+
return messages.map((message) => this.convertContentToOllamaMessage(message));
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
private convertContentToOllamaMessage = (message: OpenAIChatMessage) => {
|
|
87
|
+
if (typeof message.content === 'string') {
|
|
88
|
+
return message;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
const ollamaMessage: OllamaChatMessage = {
|
|
92
|
+
content: '',
|
|
93
|
+
role: message.role,
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
for (const content of message.content) {
|
|
97
|
+
switch (content.type) {
|
|
98
|
+
case 'text': {
|
|
99
|
+
// keep latest text input
|
|
100
|
+
ollamaMessage.content = content.text;
|
|
101
|
+
break;
|
|
102
|
+
}
|
|
103
|
+
case 'image_url': {
|
|
104
|
+
const { base64 } = parseDataUri(content.image_url.url);
|
|
105
|
+
if (base64) {
|
|
106
|
+
ollamaMessage.images ??= [];
|
|
107
|
+
ollamaMessage.images.push(base64);
|
|
108
|
+
}
|
|
109
|
+
break;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
return ollamaMessage;
|
|
115
|
+
};
|
|
76
116
|
}
|
|
77
117
|
|
|
78
118
|
export default LobeOllamaAI;
|
|
@@ -127,3 +127,10 @@ export interface ChatCompletionTool {
|
|
|
127
127
|
}
|
|
128
128
|
|
|
129
129
|
export type ChatStreamCallbacks = OpenAIStreamCallbacks;
|
|
130
|
+
|
|
131
|
+
export interface OllamaChatMessage extends OpenAIChatMessage {
|
|
132
|
+
/**
|
|
133
|
+
* @description images for ollama vision models (https://ollama.com/blog/vision-models)
|
|
134
|
+
*/
|
|
135
|
+
images?: string[];
|
|
136
|
+
}
|