@aigne/anthropic 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,310 @@
1
+ import { ChatModel, } from "@aigne/core";
2
+ import { parseJSON } from "@aigne/core/utils/json-schema.js";
3
+ import { mergeUsage } from "@aigne/core/utils/model-utils.js";
4
+ import { agentResponseStreamToObject } from "@aigne/core/utils/stream-utils.js";
5
+ import { checkArguments, isEmpty, isNonNullable, } from "@aigne/core/utils/type-utils.js";
6
+ import Anthropic from "@anthropic-ai/sdk";
7
+ import { z } from "zod";
8
+ const CHAT_MODEL_CLAUDE_DEFAULT_MODEL = "claude-3-7-sonnet-latest";
9
+ /**
10
+ * @hidden
11
+ */
12
+ export const claudeChatModelOptionsSchema = z.object({
13
+ apiKey: z.string().optional(),
14
+ model: z.string().optional(),
15
+ modelOptions: z
16
+ .object({
17
+ model: z.string().optional(),
18
+ temperature: z.number().optional(),
19
+ topP: z.number().optional(),
20
+ frequencyPenalty: z.number().optional(),
21
+ presencePenalty: z.number().optional(),
22
+ parallelToolCalls: z.boolean().optional().default(true),
23
+ })
24
+ .optional(),
25
+ });
26
+ /**
27
+ * Implementation of the ChatModel interface for Anthropic's Claude API
28
+ *
29
+ * This model provides access to Claude's capabilities including:
30
+ * - Text generation
31
+ * - Tool use
32
+ * - JSON structured output
33
+ *
34
+ * Default model: 'claude-3-7-sonnet-latest'
35
+ *
36
+ * @example
37
+ * Here's how to create and use a Claude chat model:
38
+ * {@includeCode ../test/anthropic-chat-model.test.ts#example-anthropic-chat-model}
39
+ *
40
+ * @example
41
+ * Here's an example with streaming response:
42
+ * {@includeCode ../test/anthropic-chat-model.test.ts#example-anthropic-chat-model-streaming-async-generator}
43
+ */
44
+ export class AnthropicChatModel extends ChatModel {
45
+ options;
46
+ constructor(options) {
47
+ if (options)
48
+ checkArguments("AnthropicChatModel", claudeChatModelOptionsSchema, options);
49
+ super();
50
+ this.options = options;
51
+ }
52
+ /**
53
+ * @hidden
54
+ */
55
+ _client;
56
+ get client() {
57
+ const apiKey = this.options?.apiKey || process.env.ANTHROPIC_API_KEY || process.env.CLAUDE_API_KEY;
58
+ if (!apiKey)
59
+ throw new Error("Api Key is required for AnthropicChatModel");
60
+ this._client ??= new Anthropic({ apiKey });
61
+ return this._client;
62
+ }
63
+ get modelOptions() {
64
+ return this.options?.modelOptions;
65
+ }
66
+ /**
67
+ * Process the input using Claude's chat model
68
+ * @param input - The input to process
69
+ * @returns The processed output from the model
70
+ */
71
+ process(input) {
72
+ return this._process(input);
73
+ }
74
+ async _process(input) {
75
+ const model = this.options?.model || CHAT_MODEL_CLAUDE_DEFAULT_MODEL;
76
+ const disableParallelToolUse = input.modelOptions?.parallelToolCalls === false ||
77
+ this.modelOptions?.parallelToolCalls === false;
78
+ const body = {
79
+ model,
80
+ temperature: input.modelOptions?.temperature ?? this.modelOptions?.temperature,
81
+ top_p: input.modelOptions?.topP ?? this.modelOptions?.topP,
82
+ // TODO: make dynamic based on model https://docs.anthropic.com/en/docs/about-claude/models/all-models
83
+ max_tokens: /claude-3-[5|7]/.test(model) ? 8192 : 4096,
84
+ ...convertMessages(input),
85
+ ...convertTools({ ...input, disableParallelToolUse }),
86
+ };
87
+ const stream = this.client.messages.stream({
88
+ ...body,
89
+ stream: true,
90
+ });
91
+ if (input.responseFormat?.type !== "json_schema") {
92
+ return this.extractResultFromAnthropicStream(stream, true);
93
+ }
94
+ const result = await this.extractResultFromAnthropicStream(stream);
95
+ // Claude doesn't support json_schema response and tool calls in the same request,
96
+ // so we need to make a separate request for json_schema response when the tool calls is empty
97
+ if (!result.toolCalls?.length && input.responseFormat?.type === "json_schema") {
98
+ const output = await this.requestStructuredOutput(body, input.responseFormat);
99
+ return {
100
+ ...output,
101
+ // merge usage from both requests
102
+ usage: mergeUsage(result.usage, output.usage),
103
+ };
104
+ }
105
+ return result;
106
+ }
107
+ async extractResultFromAnthropicStream(stream, streaming) {
108
+ const logs = [];
109
+ const result = new ReadableStream({
110
+ async start(controller) {
111
+ try {
112
+ const toolCalls = [];
113
+ let usage;
114
+ let model;
115
+ for await (const chunk of stream) {
116
+ if (chunk.type === "message_start") {
117
+ if (!model) {
118
+ model = chunk.message.model;
119
+ controller.enqueue({ delta: { json: { model } } });
120
+ }
121
+ const { input_tokens, output_tokens } = chunk.message.usage;
122
+ usage = {
123
+ inputTokens: input_tokens,
124
+ outputTokens: output_tokens,
125
+ };
126
+ }
127
+ if (chunk.type === "message_delta" && usage) {
128
+ usage.outputTokens = chunk.usage.output_tokens;
129
+ }
130
+ logs.push(JSON.stringify(chunk));
131
+ // handle streaming text
132
+ if (chunk.type === "content_block_delta" && chunk.delta.type === "text_delta") {
133
+ controller.enqueue({ delta: { text: { text: chunk.delta.text } } });
134
+ }
135
+ if (chunk.type === "content_block_start" && chunk.content_block.type === "tool_use") {
136
+ toolCalls[chunk.index] = {
137
+ type: "function",
138
+ id: chunk.content_block.id,
139
+ function: {
140
+ name: chunk.content_block.name,
141
+ arguments: {},
142
+ },
143
+ args: "",
144
+ };
145
+ }
146
+ if (chunk.type === "content_block_delta" && chunk.delta.type === "input_json_delta") {
147
+ const call = toolCalls[chunk.index];
148
+ if (!call)
149
+ throw new Error("Tool call not found");
150
+ call.args += chunk.delta.partial_json;
151
+ }
152
+ }
153
+ controller.enqueue({ delta: { json: { usage } } });
154
+ if (toolCalls.length) {
155
+ controller.enqueue({
156
+ delta: {
157
+ json: {
158
+ toolCalls: toolCalls
159
+ .map(({ args, ...c }) => ({
160
+ ...c,
161
+ function: {
162
+ ...c.function,
163
+ // NOTE: claude may return a blank string for empty object (the tool's input schema is a empty object)
164
+ arguments: args.trim() ? parseJSON(args) : {},
165
+ },
166
+ }))
167
+ .filter(isNonNullable),
168
+ },
169
+ },
170
+ });
171
+ }
172
+ controller.close();
173
+ }
174
+ catch (error) {
175
+ controller.error(error);
176
+ }
177
+ },
178
+ });
179
+ return streaming ? result : await agentResponseStreamToObject(result);
180
+ }
181
+ async requestStructuredOutput(body, responseFormat) {
182
+ if (responseFormat?.type !== "json_schema") {
183
+ throw new Error("Expected json_schema response format");
184
+ }
185
+ const result = await this.client.messages.create({
186
+ ...body,
187
+ tools: [
188
+ {
189
+ name: "generate_json",
190
+ description: "Generate a json result by given context",
191
+ input_schema: responseFormat.jsonSchema.schema,
192
+ },
193
+ ],
194
+ tool_choice: {
195
+ type: "tool",
196
+ name: "generate_json",
197
+ disable_parallel_tool_use: true,
198
+ },
199
+ stream: false,
200
+ });
201
+ const jsonTool = result.content.find((i) => i.type === "tool_use" && i.name === "generate_json");
202
+ if (!jsonTool)
203
+ throw new Error("Json tool not found");
204
+ return {
205
+ json: jsonTool.input,
206
+ model: result.model,
207
+ usage: {
208
+ inputTokens: result.usage.input_tokens,
209
+ outputTokens: result.usage.output_tokens,
210
+ },
211
+ };
212
+ }
213
+ }
214
+ function convertMessages({ messages, responseFormat }) {
215
+ const systemMessages = [];
216
+ const msgs = [];
217
+ for (const msg of messages) {
218
+ if (msg.role === "system") {
219
+ if (typeof msg.content !== "string")
220
+ throw new Error("System message must have content");
221
+ systemMessages.push(msg.content);
222
+ }
223
+ else if (msg.role === "tool") {
224
+ if (!msg.toolCallId)
225
+ throw new Error("Tool message must have toolCallId");
226
+ if (typeof msg.content !== "string")
227
+ throw new Error("Tool message must have string content");
228
+ msgs.push({
229
+ role: "user",
230
+ content: [{ type: "tool_result", tool_use_id: msg.toolCallId, content: msg.content }],
231
+ });
232
+ }
233
+ else if (msg.role === "user") {
234
+ if (!msg.content)
235
+ throw new Error("User message must have content");
236
+ msgs.push({ role: "user", content: convertContent(msg.content) });
237
+ }
238
+ else if (msg.role === "agent") {
239
+ if (msg.toolCalls?.length) {
240
+ msgs.push({
241
+ role: "assistant",
242
+ content: msg.toolCalls.map((i) => ({
243
+ type: "tool_use",
244
+ id: i.id,
245
+ name: i.function.name,
246
+ input: i.function.arguments,
247
+ })),
248
+ });
249
+ }
250
+ else if (msg.content) {
251
+ msgs.push({ role: "assistant", content: convertContent(msg.content) });
252
+ }
253
+ else {
254
+ throw new Error("Agent message must have content or toolCalls");
255
+ }
256
+ }
257
+ }
258
+ if (responseFormat?.type === "json_schema") {
259
+ systemMessages.push(`You should provide a json response with schema: ${JSON.stringify(responseFormat.jsonSchema.schema)}`);
260
+ }
261
+ const system = systemMessages.join("\n").trim() || undefined;
262
+ // Claude requires at least one message, so we add a system message if there are no messages
263
+ if (msgs.length === 0) {
264
+ if (!system)
265
+ throw new Error("No messages provided");
266
+ return { messages: [{ role: "user", content: system }] };
267
+ }
268
+ return { messages: msgs, system };
269
+ }
270
+ function convertContent(content) {
271
+ if (typeof content === "string")
272
+ return content;
273
+ if (Array.isArray(content)) {
274
+ return content.map((item) => item.type === "image_url"
275
+ ? { type: "image", source: { type: "url", url: item.url } }
276
+ : { type: "text", text: item.text });
277
+ }
278
+ throw new Error("Invalid chat message content");
279
+ }
280
+ function convertTools({ tools, toolChoice, disableParallelToolUse, }) {
281
+ let choice;
282
+ if (typeof toolChoice === "object" && "type" in toolChoice && toolChoice.type === "function") {
283
+ choice = {
284
+ type: "tool",
285
+ name: toolChoice.function.name,
286
+ disable_parallel_tool_use: disableParallelToolUse,
287
+ };
288
+ }
289
+ else if (toolChoice === "required") {
290
+ choice = { type: "any", disable_parallel_tool_use: disableParallelToolUse };
291
+ }
292
+ else if (toolChoice === "auto") {
293
+ choice = { type: "auto", disable_parallel_tool_use: disableParallelToolUse };
294
+ }
295
+ else if (toolChoice === "none") {
296
+ choice = { type: "none" };
297
+ }
298
+ return {
299
+ tools: tools?.length
300
+ ? tools.map((i) => ({
301
+ name: i.function.name,
302
+ description: i.function.description,
303
+ input_schema: isEmpty(i.function.parameters)
304
+ ? { type: "object" }
305
+ : i.function.parameters,
306
+ }))
307
+ : undefined,
308
+ tool_choice: choice,
309
+ };
310
+ }
@@ -0,0 +1 @@
1
+ export * from "./anthropic-chat-model.js";
@@ -0,0 +1 @@
1
+ export * from "./anthropic-chat-model.js";
@@ -0,0 +1 @@
1
+ {"type": "module"}
package/package.json ADDED
@@ -0,0 +1,55 @@
1
+ {
2
+ "name": "@aigne/anthropic",
3
+ "version": "0.1.0",
4
+ "description": "AIGNE Anthropic SDK for integrating with Claude AI models",
5
+ "publishConfig": {
6
+ "access": "public"
7
+ },
8
+ "author": "Arcblock <blocklet@arcblock.io> https://github.com/blocklet",
9
+ "homepage": "https://github.com/AIGNE-io/aigne-framework",
10
+ "license": "Elastic-2.0",
11
+ "repository": {
12
+ "type": "git",
13
+ "url": "git+https://github.com/AIGNE-io/aigne-framework"
14
+ },
15
+ "files": [
16
+ "lib/cjs",
17
+ "lib/dts",
18
+ "lib/esm",
19
+ "LICENSE",
20
+ "README.md",
21
+ "CHANGELOG.md"
22
+ ],
23
+ "type": "module",
24
+ "main": "./lib/cjs/index.js",
25
+ "module": "./lib/esm/index.js",
26
+ "types": "./lib/dts/index.d.ts",
27
+ "exports": {
28
+ ".": {
29
+ "import": "./lib/esm/index.js",
30
+ "require": "./lib/cjs/index.js",
31
+ "types": "./lib/dts/index.d.ts"
32
+ }
33
+ },
34
+ "dependencies": {
35
+ "@anthropic-ai/sdk": "^0.41.0",
36
+ "zod": "^3.24.4",
37
+ "@aigne/core": "^1.16.0"
38
+ },
39
+ "devDependencies": {
40
+ "@types/bun": "^1.2.12",
41
+ "@types/node": "^22.15.15",
42
+ "npm-run-all": "^4.1.5",
43
+ "rimraf": "^6.0.1",
44
+ "typescript": "^5.8.3",
45
+ "@aigne/test-utils": "^0.3.0"
46
+ },
47
+ "scripts": {
48
+ "lint": "tsc --noEmit",
49
+ "build": "tsc --build scripts/tsconfig.build.json",
50
+ "clean": "rimraf lib test/coverage",
51
+ "test": "bun test",
52
+ "test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-reporter=text",
53
+ "postbuild": "echo '{\"type\": \"module\"}' > lib/esm/package.json && echo '{\"type\": \"commonjs\"}' > lib/cjs/package.json"
54
+ }
55
+ }