@lobehub/chat 0.134.1 → 0.135.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 0.135.0](https://github.com/lobehub/lobe-chat/compare/v0.134.1...v0.135.0)
6
+
7
+ <sup>Released on **2024-03-14**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Add claude 3 to bedrock provider.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Add claude 3 to bedrock provider, closes [#1551](https://github.com/lobehub/lobe-chat/issues/1551) ([6e1fe33](https://github.com/lobehub/lobe-chat/commit/6e1fe33))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 0.134.1](https://github.com/lobehub/lobe-chat/compare/v0.134.0...v0.134.1)
6
31
 
7
32
  <sup>Released on **2024-03-13**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "0.134.1",
3
+ "version": "0.135.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -81,7 +81,7 @@
81
81
  "@ant-design/icons": "^5",
82
82
  "@anthropic-ai/sdk": "^0.17.0",
83
83
  "@auth/core": "latest",
84
- "@aws-sdk/client-bedrock-runtime": "^3.503.1",
84
+ "@aws-sdk/client-bedrock-runtime": "^3.525.0",
85
85
  "@azure/openai": "^1.0.0-beta.11",
86
86
  "@cfworker/json-schema": "^1",
87
87
  "@google/generative-ai": "^0.2.0",
@@ -24,8 +24,7 @@ const Anthropic: ModelProviderCard = {
24
24
  description:
25
25
  'Fastest and most compact model for near-instant responsiveness. Quick and accurate targeted performance',
26
26
  displayName: 'Claude 3 Haiku',
27
- hidden: true,
28
- id: 'claude-3-haiku-20240229',
27
+ id: 'claude-3-haiku-20240307',
29
28
  maxOutput: 4096,
30
29
  tokens: 200_000,
31
30
  vision: true,
@@ -12,18 +12,34 @@ const Bedrock: ModelProviderCard = {
12
12
  },
13
13
  {
14
14
  description:
15
- 'Claude Instant 1.2 v1.2,上下文大小等于 100k,一个更快更便宜但仍然非常能干的模型,可以处理包括随意对话在内的多种任务。',
16
- displayName: 'Claude Instant 1.2',
17
- id: 'anthropic.claude-instant-v1',
18
- tokens: 100_000,
15
+ 'Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之间取得理想的平衡,尤其是在处理企业工作负载方面。该模型提供最大的效用,同时价格低于竞争产品,并且其经过精心设计,是大规模部署人工智能的可信赖、高耐久性骨干模型。 Claude 3 Sonnet 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。',
16
+ displayName: 'Claude 3 Sonnet',
17
+ id: 'anthropic.claude-3-sonnet-20240229-v1:0',
18
+ tokens: 200_000,
19
+ vision: true,
20
+ },
21
+ {
22
+ description:
23
+ 'Claude 3 Haiku 是 Anthropic 最快速、最紧凑的模型,具有近乎即时的响应能力。该模型可以快速回答简单的查询和请求。客户将能够构建模仿人类交互的无缝人工智能体验。 Claude 3 Haiku 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。',
24
+ displayName: 'Claude 3 Haiku',
25
+ id: 'anthropic.claude-3-haiku-20240307-v1:0',
26
+ tokens: 200_000,
27
+ vision: true,
19
28
  },
20
29
  {
21
30
  description:
22
- 'Claude 2.1 v2.1,上下文大小等于 200kClaude 2 的更新版本,特性包括双倍的上下文窗口,以及在可靠性等方面的提升。',
31
+ 'Claude 2.1 v2.1,上下文大小等于 200kClaude 2 的更新版本,采用双倍的上下文窗口,并在长文档和 RAG 上下文中提高可靠性、幻觉率和循证准确性。',
23
32
  displayName: 'Claude 2.1',
24
33
  id: 'anthropic.claude-v2:1',
25
34
  tokens: 200_000,
26
35
  },
36
+ {
37
+ description:
38
+ 'Claude Instant 1.2 v1.2,上下文大小等于 100k。一种更快速、更实惠但仍然非常强大的模型,它可以处理一系列任务,包括随意对话、文本分析、摘要和文档问题回答。',
39
+ displayName: 'Claude Instant 1.2',
40
+ id: 'anthropic.claude-instant-v1',
41
+ tokens: 100_000,
42
+ },
27
43
  {
28
44
  description: 'Llama 2 Chat 13B v1,上下文大小为 4k,Llama 2 模型的对话用例优化变体。',
29
45
  displayName: 'Llama 2 Chat 13B',
@@ -9,14 +9,12 @@ import { AgentRuntimeErrorType } from '../error';
9
9
  import {
10
10
  ChatCompetitionOptions,
11
11
  ChatStreamPayload,
12
- ModelProvider,
13
- OpenAIChatMessage,
14
- UserMessageContentPart,
12
+ ModelProvider
15
13
  } from '../types';
16
14
  import { AgentRuntimeError } from '../utils/createError';
17
15
  import { debugStream } from '../utils/debugStream';
18
16
  import { desensitizeUrl } from '../utils/desensitizeUrl';
19
- import { parseDataUri } from '../utils/uriParser';
17
+ import { buildAnthropicMessages } from '../utils/anthropicHelpers';
20
18
 
21
19
  const DEFAULT_BASE_URL = 'https://api.anthropic.com';
22
20
 
@@ -32,40 +30,22 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
32
30
  this.baseURL = this.client.baseURL;
33
31
  }
34
32
 
35
- private buildAnthropicMessages = (
36
- messages: OpenAIChatMessage[],
37
- ): Anthropic.Messages.MessageParam[] =>
38
- messages.map((message) => this.convertToAnthropicMessage(message));
39
-
40
- private convertToAnthropicMessage = (
41
- message: OpenAIChatMessage,
42
- ): Anthropic.Messages.MessageParam => {
43
- const content = message.content as string | UserMessageContentPart[];
44
-
45
- return {
46
- content:
47
- typeof content === 'string' ? content : content.map((c) => this.convertToAnthropicBlock(c)),
48
- role: message.role === 'function' || message.role === 'system' ? 'assistant' : message.role,
49
- };
50
- };
51
-
52
33
  async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
53
34
  const { messages, model, max_tokens, temperature, top_p } = payload;
54
35
  const system_message = messages.find((m) => m.role === 'system');
55
36
  const user_messages = messages.filter((m) => m.role !== 'system');
56
37
 
57
- const requestParams: Anthropic.MessageCreateParams = {
58
- max_tokens: max_tokens || 4096,
59
- messages: this.buildAnthropicMessages(user_messages),
60
- model: model,
61
- stream: true,
62
- system: system_message?.content as string,
63
- temperature: temperature,
64
- top_p: top_p,
65
- };
66
-
67
38
  try {
68
- const response = await this.client.messages.create(requestParams);
39
+ const response = await this.client.messages.create({
40
+ max_tokens: max_tokens || 4096,
41
+ messages: buildAnthropicMessages(user_messages),
42
+ model: model,
43
+ stream: true,
44
+ system: system_message?.content as string,
45
+ temperature: temperature,
46
+ top_p: top_p,
47
+ });
48
+
69
49
  const [prod, debug] = response.tee();
70
50
 
71
51
  if (process.env.DEBUG_ANTHROPIC_CHAT_COMPLETION === '1') {
@@ -105,29 +85,6 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
105
85
  });
106
86
  }
107
87
  }
108
-
109
- private convertToAnthropicBlock(
110
- content: UserMessageContentPart,
111
- ): Anthropic.ContentBlock | Anthropic.ImageBlockParam {
112
- switch (content.type) {
113
- case 'text': {
114
- return content;
115
- }
116
-
117
- case 'image_url': {
118
- const { mimeType, base64 } = parseDataUri(content.image_url.url);
119
-
120
- return {
121
- source: {
122
- data: base64 as string,
123
- media_type: mimeType as Anthropic.ImageBlockParam.Source['media_type'],
124
- type: 'base64',
125
- },
126
- type: 'image',
127
- };
128
- }
129
- }
130
- }
131
88
  }
132
89
 
133
90
  export default LobeAnthropicAI;
@@ -0,0 +1,217 @@
1
+ // @vitest-environment node
2
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
+
4
+ import {
5
+ InvokeModelWithResponseStreamCommand,
6
+ } from '@aws-sdk/client-bedrock-runtime';
7
+ import * as debugStreamModule from '../utils/debugStream';
8
+ import { LobeBedrockAI } from './index';
9
+
10
+ const provider = 'bedrock';
11
+
12
+ // Mock the console.error to avoid polluting test output
13
+ vi.spyOn(console, 'error').mockImplementation(() => {});
14
+
15
+ vi.mock("@aws-sdk/client-bedrock-runtime", async (importOriginal) => {
16
+ const module = await importOriginal();
17
+ return {
18
+ ...(module as any),
19
+ InvokeModelWithResponseStreamCommand: vi.fn()
20
+ }
21
+ })
22
+
23
+ let instance: LobeBedrockAI;
24
+
25
+ beforeEach(() => {
26
+ instance = new LobeBedrockAI({
27
+ region: 'us-west-2',
28
+ accessKeyId: 'test-access-key-id',
29
+ accessKeySecret: 'test-access-key-secret',
30
+ });
31
+
32
+ vi.spyOn(instance['client'], 'send').mockReturnValue(new ReadableStream() as any);
33
+ });
34
+
35
+ afterEach(() => {
36
+ vi.clearAllMocks();
37
+ });
38
+
39
+ describe('LobeBedrockAI', () => {
40
+ describe('init', () => {
41
+ it('should correctly initialize with AWS credentials', async () => {
42
+ const instance = new LobeBedrockAI({
43
+ region: 'us-west-2',
44
+ accessKeyId: 'test-access-key-id',
45
+ accessKeySecret: 'test-access-key-secret',
46
+ });
47
+ expect(instance).toBeInstanceOf(LobeBedrockAI);
48
+ });
49
+ });
50
+
51
+ describe('chat', () => {
52
+
53
+ describe('Claude model', () => {
54
+
55
+ it('should return a Response on successful API call', async () => {
56
+ const result = await instance.chat({
57
+ messages: [{ content: 'Hello', role: 'user' }],
58
+ model: 'anthropic.claude-v2:1',
59
+ temperature: 0,
60
+ });
61
+
62
+ // Assert
63
+ expect(result).toBeInstanceOf(Response);
64
+ });
65
+
66
+ it('should handle text messages correctly', async () => {
67
+ // Arrange
68
+ const mockStream = new ReadableStream({
69
+ start(controller) {
70
+ controller.enqueue('Hello, world!');
71
+ controller.close();
72
+ },
73
+ });
74
+ const mockResponse = Promise.resolve(mockStream);
75
+ (instance['client'].send as Mock).mockResolvedValue(mockResponse);
76
+
77
+ // Act
78
+ const result = await instance.chat({
79
+ messages: [{ content: 'Hello', role: 'user' }],
80
+ model: 'anthropic.claude-v2:1',
81
+ temperature: 0,
82
+ top_p: 1,
83
+ });
84
+
85
+ // Assert
86
+ expect(InvokeModelWithResponseStreamCommand).toHaveBeenCalledWith({
87
+ accept: 'application/json',
88
+ body: JSON.stringify({
89
+ anthropic_version: "bedrock-2023-05-31",
90
+ max_tokens: 4096,
91
+ messages: [{ content: 'Hello', role: 'user' }],
92
+ temperature: 0,
93
+ top_p: 1,
94
+ }),
95
+ contentType: 'application/json',
96
+ modelId: 'anthropic.claude-v2:1',
97
+ });
98
+ expect(result).toBeInstanceOf(Response);
99
+ });
100
+
101
+ it('should handle system prompt correctly', async () => {
102
+ // Arrange
103
+ const mockStream = new ReadableStream({
104
+ start(controller) {
105
+ controller.enqueue('Hello, world!');
106
+ controller.close();
107
+ },
108
+ });
109
+ const mockResponse = Promise.resolve(mockStream);
110
+ (instance['client'].send as Mock).mockResolvedValue(mockResponse);
111
+
112
+ // Act
113
+ const result = await instance.chat({
114
+ messages: [
115
+ { content: 'You are an awesome greeter', role: 'system' },
116
+ { content: 'Hello', role: 'user' },
117
+ ],
118
+ model: 'anthropic.claude-v2:1',
119
+ temperature: 0,
120
+ top_p: 1,
121
+ });
122
+
123
+ // Assert
124
+ expect(InvokeModelWithResponseStreamCommand).toHaveBeenCalledWith({
125
+ accept: 'application/json',
126
+ body: JSON.stringify({
127
+ anthropic_version: "bedrock-2023-05-31",
128
+ max_tokens: 4096,
129
+ messages: [{ content: 'Hello', role: 'user' }],
130
+ system: 'You are an awesome greeter',
131
+ temperature: 0,
132
+ top_p: 1,
133
+ }),
134
+ contentType: 'application/json',
135
+ modelId: 'anthropic.claude-v2:1',
136
+ });
137
+ expect(result).toBeInstanceOf(Response);
138
+ });
139
+
140
+ it('should call Anthropic model with supported opions', async () => {
141
+ // Arrange
142
+ const mockStream = new ReadableStream({
143
+ start(controller) {
144
+ controller.enqueue('Hello, world!');
145
+ controller.close();
146
+ },
147
+ });
148
+ const mockResponse = Promise.resolve(mockStream);
149
+ (instance['client'].send as Mock).mockResolvedValue(mockResponse);
150
+
151
+ // Act
152
+ const result = await instance.chat({
153
+ max_tokens: 2048,
154
+ messages: [{ content: 'Hello', role: 'user' }],
155
+ model: 'anthropic.claude-v2:1',
156
+ temperature: 0.5,
157
+ top_p: 1,
158
+ });
159
+
160
+ // Assert
161
+ expect(InvokeModelWithResponseStreamCommand).toHaveBeenCalledWith({
162
+ accept: 'application/json',
163
+ body: JSON.stringify({
164
+ anthropic_version: "bedrock-2023-05-31",
165
+ max_tokens: 2048,
166
+ messages: [{ content: 'Hello', role: 'user' }],
167
+ temperature: 0.5,
168
+ top_p: 1,
169
+ }),
170
+ contentType: 'application/json',
171
+ modelId: 'anthropic.claude-v2:1',
172
+ });
173
+ expect(result).toBeInstanceOf(Response);
174
+ });
175
+
176
+ it('should call Anthropic model without unsupported opions', async () => {
177
+ // Arrange
178
+ const mockStream = new ReadableStream({
179
+ start(controller) {
180
+ controller.enqueue('Hello, world!');
181
+ controller.close();
182
+ },
183
+ });
184
+ const mockResponse = Promise.resolve(mockStream);
185
+ (instance['client'].send as Mock).mockResolvedValue(mockResponse);
186
+
187
+ // Act
188
+ const result = await instance.chat({
189
+ frequency_penalty: 0.5, // Unsupported option
190
+ max_tokens: 2048,
191
+ messages: [{ content: 'Hello', role: 'user' }],
192
+ model: 'anthropic.claude-v2:1',
193
+ presence_penalty: 0.5,
194
+ temperature: 0.5,
195
+ top_p: 1,
196
+ });
197
+
198
+ // Assert
199
+ expect(InvokeModelWithResponseStreamCommand).toHaveBeenCalledWith({
200
+ accept: 'application/json',
201
+ body: JSON.stringify({
202
+ anthropic_version: "bedrock-2023-05-31",
203
+ max_tokens: 2048,
204
+ messages: [{ content: 'Hello', role: 'user' }],
205
+ temperature: 0.5,
206
+ top_p: 1,
207
+ }),
208
+ contentType: 'application/json',
209
+ modelId: 'anthropic.claude-v2:1',
210
+ });
211
+ expect(result).toBeInstanceOf(Response);
212
+ });
213
+
214
+ });
215
+
216
+ });
217
+ });
@@ -2,15 +2,23 @@ import {
2
2
  BedrockRuntimeClient,
3
3
  InvokeModelWithResponseStreamCommand,
4
4
  } from '@aws-sdk/client-bedrock-runtime';
5
- import { AWSBedrockAnthropicStream, AWSBedrockLlama2Stream, StreamingTextResponse } from 'ai';
6
- import { experimental_buildAnthropicPrompt, experimental_buildLlama2Prompt } from 'ai/prompts';
5
+ import {
6
+ AWSBedrockLlama2Stream,
7
+ AWSBedrockStream,
8
+ StreamingTextResponse
9
+ } from 'ai';
10
+ import { experimental_buildLlama2Prompt } from 'ai/prompts';
7
11
 
8
12
  import { LobeRuntimeAI } from '../BaseAI';
9
13
  import { AgentRuntimeErrorType } from '../error';
10
- import { ChatStreamPayload, ModelProvider } from '../types';
14
+ import {
15
+ ChatCompetitionOptions,
16
+ ChatStreamPayload,
17
+ ModelProvider,
18
+ } from '../types';
11
19
  import { AgentRuntimeError } from '../utils/createError';
12
20
  import { debugStream } from '../utils/debugStream';
13
- import { DEBUG_CHAT_COMPLETION } from '../utils/env';
21
+ import { buildAnthropicMessages } from '../utils/anthropicHelpers';
14
22
 
15
23
  export interface LobeBedrockAIParams {
16
24
  accessKeyId?: string;
@@ -38,23 +46,32 @@ export class LobeBedrockAI implements LobeRuntimeAI {
38
46
  });
39
47
  }
40
48
 
41
- async chat(payload: ChatStreamPayload) {
49
+ async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
42
50
  if (payload.model.startsWith('meta')) return this.invokeLlamaModel(payload);
43
51
 
44
- return this.invokeClaudeModel(payload);
52
+ return this.invokeClaudeModel(payload, options);
45
53
  }
46
54
 
47
55
  private invokeClaudeModel = async (
48
56
  payload: ChatStreamPayload,
57
+ options?: ChatCompetitionOptions
49
58
  ): Promise<StreamingTextResponse> => {
59
+ const { max_tokens, messages, model, temperature, top_p } = payload;
60
+ const system_message = messages.find((m) => m.role === 'system');
61
+ const user_messages = messages.filter((m) => m.role !== 'system');
62
+
50
63
  const command = new InvokeModelWithResponseStreamCommand({
51
64
  accept: 'application/json',
52
65
  body: JSON.stringify({
53
- max_tokens_to_sample: payload.max_tokens || 400,
54
- prompt: experimental_buildAnthropicPrompt(payload.messages as any),
66
+ anthropic_version: "bedrock-2023-05-31",
67
+ max_tokens: max_tokens || 4096,
68
+ messages: buildAnthropicMessages(user_messages),
69
+ system: system_message?.content as string,
70
+ temperature: temperature,
71
+ top_p: top_p,
55
72
  }),
56
73
  contentType: 'application/json',
57
- modelId: payload.model,
74
+ modelId: model,
58
75
  });
59
76
 
60
77
  try {
@@ -62,11 +79,11 @@ export class LobeBedrockAI implements LobeRuntimeAI {
62
79
  const bedrockResponse = await this.client.send(command);
63
80
 
64
81
  // Convert the response into a friendly text-stream
65
- const stream = AWSBedrockAnthropicStream(bedrockResponse);
82
+ const stream = AWSBedrockStream(bedrockResponse, options?.callback, (chunk) => chunk.delta?.text);
66
83
 
67
84
  const [debug, output] = stream.tee();
68
85
 
69
- if (DEBUG_CHAT_COMPLETION) {
86
+ if (process.env.DEBUG_BEDROCK_CHAT_COMPLETION === '1') {
70
87
  debugStream(debug).catch(console.error);
71
88
  }
72
89
 
@@ -88,15 +105,18 @@ export class LobeBedrockAI implements LobeRuntimeAI {
88
105
  }
89
106
  };
90
107
 
91
- private invokeLlamaModel = async (payload: ChatStreamPayload) => {
108
+ private invokeLlamaModel = async (
109
+ payload: ChatStreamPayload
110
+ ): Promise<StreamingTextResponse> => {
111
+ const { max_tokens, messages, model } = payload;
92
112
  const command = new InvokeModelWithResponseStreamCommand({
93
113
  accept: 'application/json',
94
114
  body: JSON.stringify({
95
- max_gen_len: payload.max_tokens || 400,
96
- prompt: experimental_buildLlama2Prompt(payload.messages as any),
115
+ max_gen_len: max_tokens || 400,
116
+ prompt: experimental_buildLlama2Prompt(messages as any),
97
117
  }),
98
118
  contentType: 'application/json',
99
- modelId: payload.model,
119
+ modelId: model,
100
120
  });
101
121
 
102
122
  try {
@@ -108,7 +128,7 @@ export class LobeBedrockAI implements LobeRuntimeAI {
108
128
 
109
129
  const [debug, output] = stream.tee();
110
130
 
111
- if (DEBUG_CHAT_COMPLETION) {
131
+ if (process.env.DEBUG_BEDROCK_CHAT_COMPLETION === '1') {
112
132
  debugStream(debug).catch(console.error);
113
133
  }
114
134
  // Respond with the stream
@@ -129,6 +149,7 @@ export class LobeBedrockAI implements LobeRuntimeAI {
129
149
  });
130
150
  }
131
151
  };
152
+
132
153
  }
133
154
 
134
155
  export default LobeBedrockAI;
@@ -0,0 +1,59 @@
1
+ import { describe, expect, it } from 'vitest';
2
+
3
+ import {
4
+ buildAnthropicMessage,
5
+ buildAnthropicBlock,
6
+ } from './anthropicHelpers';
7
+
8
+ import { parseDataUri } from './uriParser';
9
+ import {
10
+ OpenAIChatMessage,
11
+ UserMessageContentPart,
12
+ } from '../types/chat';
13
+
14
+ describe('anthropicHelpers', () => {
15
+
16
+ // Mock the parseDataUri function since it's an implementation detail
17
+ vi.mock('./uriParser', () => ({
18
+ parseDataUri: vi.fn().mockReturnValue({
19
+ mimeType: 'image/jpeg',
20
+ base64: 'base64EncodedString',
21
+ }),
22
+ }));
23
+
24
+ describe('buildAnthropicBlock', () => {
25
+ it('should return the content as is for text type', () => {
26
+ const content: UserMessageContentPart =
27
+ { type: 'text', text: 'Hello!' };
28
+ const result = buildAnthropicBlock(content);
29
+ expect(result).toEqual(content);
30
+ });
31
+
32
+ it('should transform an image URL into an Anthropic.ImageBlockParam', () => {
33
+ const content: UserMessageContentPart =
34
+ { type: 'image_url', image_url: { url: 'data:image/jpeg;base64,base64EncodedString' } };
35
+ const result = buildAnthropicBlock(content);
36
+ expect(parseDataUri).toHaveBeenCalledWith(content.image_url.url);
37
+ expect(result).toEqual({
38
+ source: {
39
+ data: 'base64EncodedString',
40
+ media_type: 'image/jpeg',
41
+ type: 'base64',
42
+ },
43
+ type: 'image',
44
+ });
45
+ });
46
+ });
47
+
48
+ describe('buildAnthropicMessage', () => {
49
+ it('should correctly convert system message to assistant message', () => {
50
+ const message: OpenAIChatMessage =
51
+ { content: [{ type: 'text', text: 'Hello!' }], role: 'system' };
52
+ const result = buildAnthropicMessage(message);
53
+ expect(result).toEqual(
54
+ { content: [{ type: 'text', text: 'Hello!' }], role: 'assistant' }
55
+ );
56
+ });
57
+ });
58
+
59
+ });
@@ -0,0 +1,47 @@
1
+ import Anthropic from '@anthropic-ai/sdk';
2
+
3
+ import {
4
+ OpenAIChatMessage,
5
+ UserMessageContentPart,
6
+ } from '../types';
7
+
8
+ import { parseDataUri } from './uriParser';
9
+
10
+ export const buildAnthropicBlock = (
11
+ content: UserMessageContentPart,
12
+ ): Anthropic.ContentBlock | Anthropic.ImageBlockParam => {
13
+ switch (content.type) {
14
+ case 'text': {
15
+ return content;
16
+ }
17
+
18
+ case 'image_url': {
19
+ const { mimeType, base64 } = parseDataUri(content.image_url.url);
20
+
21
+ return {
22
+ source: {
23
+ data: base64 as string,
24
+ media_type: mimeType as Anthropic.ImageBlockParam.Source['media_type'],
25
+ type: 'base64',
26
+ },
27
+ type: 'image',
28
+ };
29
+ }
30
+ }
31
+ }
32
+
33
+ export const buildAnthropicMessage = (
34
+ message: OpenAIChatMessage,
35
+ ): Anthropic.Messages.MessageParam => {
36
+ const content = message.content as string | UserMessageContentPart[];
37
+ return {
38
+ content:
39
+ typeof content === 'string' ? content : content.map((c) => buildAnthropicBlock(c)),
40
+ role: message.role === 'function' || message.role === 'system' ? 'assistant' : message.role,
41
+ };
42
+ };
43
+
44
+ export const buildAnthropicMessages = (
45
+ messages: OpenAIChatMessage[],
46
+ ): Anthropic.Messages.MessageParam[] =>
47
+ messages.map((message) => buildAnthropicMessage(message));