@ddlqhd/agent-sdk 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +53 -0
- package/dist/chunk-5QMA2YBY.cjs +2880 -0
- package/dist/chunk-5QMA2YBY.cjs.map +1 -0
- package/dist/chunk-5Y56A64C.cjs +5 -0
- package/dist/chunk-5Y56A64C.cjs.map +1 -0
- package/dist/chunk-A3S3AGE3.js +3 -0
- package/dist/chunk-A3S3AGE3.js.map +1 -0
- package/dist/chunk-CNSGZVRN.cjs +152 -0
- package/dist/chunk-CNSGZVRN.cjs.map +1 -0
- package/dist/chunk-JF5AJQMU.cjs +2788 -0
- package/dist/chunk-JF5AJQMU.cjs.map +1 -0
- package/dist/chunk-NDSL7NPN.js +807 -0
- package/dist/chunk-NDSL7NPN.js.map +1 -0
- package/dist/chunk-OHXW2YM6.js +2708 -0
- package/dist/chunk-OHXW2YM6.js.map +1 -0
- package/dist/chunk-Q3SOMX26.js +2854 -0
- package/dist/chunk-Q3SOMX26.js.map +1 -0
- package/dist/chunk-WH3APNQ5.js +147 -0
- package/dist/chunk-WH3APNQ5.js.map +1 -0
- package/dist/chunk-X35MHWXE.cjs +817 -0
- package/dist/chunk-X35MHWXE.cjs.map +1 -0
- package/dist/cli/index.cjs +926 -0
- package/dist/cli/index.cjs.map +1 -0
- package/dist/cli/index.d.cts +24 -0
- package/dist/cli/index.d.ts +24 -0
- package/dist/cli/index.js +916 -0
- package/dist/cli/index.js.map +1 -0
- package/dist/index-DPsZ1zat.d.ts +447 -0
- package/dist/index-RTPmFjMp.d.cts +447 -0
- package/dist/index.cjs +508 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +664 -0
- package/dist/index.d.ts +664 -0
- package/dist/index.js +204 -0
- package/dist/index.js.map +1 -0
- package/dist/models/index.cjs +62 -0
- package/dist/models/index.cjs.map +1 -0
- package/dist/models/index.d.cts +165 -0
- package/dist/models/index.d.ts +165 -0
- package/dist/models/index.js +5 -0
- package/dist/models/index.js.map +1 -0
- package/dist/tools/index.cjs +207 -0
- package/dist/tools/index.cjs.map +1 -0
- package/dist/tools/index.d.cts +108 -0
- package/dist/tools/index.d.ts +108 -0
- package/dist/tools/index.js +6 -0
- package/dist/tools/index.js.map +1 -0
- package/dist/types-C0aX_Qdp.d.cts +917 -0
- package/dist/types-C0aX_Qdp.d.ts +917 -0
- package/package.json +80 -0
|
@@ -0,0 +1,807 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { BaseModelAdapter, toolsToModelSchema } from './chunk-WH3APNQ5.js';
|
|
3
|
+
|
|
4
|
+
// src/models/openai.ts
|
|
5
|
+
var OPENAI_CAPABILITIES = {
|
|
6
|
+
"gpt-4o": { contextLength: 128e3, maxOutputTokens: 16384 },
|
|
7
|
+
"gpt-4o-mini": { contextLength: 128e3, maxOutputTokens: 16384 },
|
|
8
|
+
"gpt-4-turbo": { contextLength: 128e3, maxOutputTokens: 4096 },
|
|
9
|
+
"gpt-4": { contextLength: 8192, maxOutputTokens: 4096 },
|
|
10
|
+
"gpt-3.5-turbo": { contextLength: 16385, maxOutputTokens: 4096 }
|
|
11
|
+
};
|
|
12
|
+
var OpenAIAdapter = class extends BaseModelAdapter {
|
|
13
|
+
name;
|
|
14
|
+
apiKey;
|
|
15
|
+
baseUrl;
|
|
16
|
+
model;
|
|
17
|
+
organization;
|
|
18
|
+
constructor(config = {}) {
|
|
19
|
+
super();
|
|
20
|
+
this.apiKey = config.apiKey || process.env.OPENAI_API_KEY || "";
|
|
21
|
+
this.baseUrl = config.baseUrl || process.env.OPENAI_BASE_URL || "https://api.openai.com/v1";
|
|
22
|
+
this.model = config.model || "gpt-4o";
|
|
23
|
+
this.organization = config.organization || process.env.OPENAI_ORG_ID;
|
|
24
|
+
if (!this.apiKey) {
|
|
25
|
+
throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass apiKey in config.");
|
|
26
|
+
}
|
|
27
|
+
this.name = `openai/${this.model}`;
|
|
28
|
+
this.capabilities = config.capabilities ?? OPENAI_CAPABILITIES[this.model] ?? { contextLength: 128e3, maxOutputTokens: 4096 };
|
|
29
|
+
}
|
|
30
|
+
async *stream(params) {
|
|
31
|
+
const body = this.buildRequestBody(params, true);
|
|
32
|
+
const response = await this.fetch("/chat/completions", body, params.signal);
|
|
33
|
+
if (!response.ok) {
|
|
34
|
+
const error = await response.text();
|
|
35
|
+
throw new Error(`OpenAI API error: ${response.status} - ${error}`);
|
|
36
|
+
}
|
|
37
|
+
const reader = response.body?.getReader();
|
|
38
|
+
if (!reader) {
|
|
39
|
+
throw new Error("No response body");
|
|
40
|
+
}
|
|
41
|
+
const decoder = new TextDecoder();
|
|
42
|
+
let buffer = "";
|
|
43
|
+
let currentToolCall = null;
|
|
44
|
+
try {
|
|
45
|
+
while (true) {
|
|
46
|
+
if (params.signal?.aborted) {
|
|
47
|
+
reader.cancel();
|
|
48
|
+
break;
|
|
49
|
+
}
|
|
50
|
+
const { done, value } = await reader.read();
|
|
51
|
+
if (done) break;
|
|
52
|
+
buffer += decoder.decode(value, { stream: true });
|
|
53
|
+
const lines = buffer.split("\n");
|
|
54
|
+
buffer = lines.pop() || "";
|
|
55
|
+
for (const line of lines) {
|
|
56
|
+
const trimmed = line.trim();
|
|
57
|
+
if (!trimmed || trimmed === "data: [DONE]") continue;
|
|
58
|
+
if (!trimmed.startsWith("data: ")) continue;
|
|
59
|
+
try {
|
|
60
|
+
const data = JSON.parse(trimmed.slice(6));
|
|
61
|
+
const choice = data.choices?.[0];
|
|
62
|
+
if (!choice) continue;
|
|
63
|
+
const raw = params.includeRawStreamEvents ? { providerRaw: data } : {};
|
|
64
|
+
if (choice.delta?.content) {
|
|
65
|
+
yield { type: "text", content: choice.delta.content, ...raw };
|
|
66
|
+
}
|
|
67
|
+
if (choice.delta?.tool_calls) {
|
|
68
|
+
for (const toolCall of choice.delta.tool_calls) {
|
|
69
|
+
if (toolCall.index !== void 0) {
|
|
70
|
+
if (toolCall.id && toolCall.function?.name) {
|
|
71
|
+
if (currentToolCall) {
|
|
72
|
+
yield {
|
|
73
|
+
type: "tool_call",
|
|
74
|
+
toolCall: {
|
|
75
|
+
id: currentToolCall.id,
|
|
76
|
+
name: currentToolCall.name,
|
|
77
|
+
arguments: this.safeParseJSON(currentToolCall.arguments)
|
|
78
|
+
},
|
|
79
|
+
...raw
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
currentToolCall = {
|
|
83
|
+
id: toolCall.id,
|
|
84
|
+
name: toolCall.function.name,
|
|
85
|
+
arguments: toolCall.function.arguments || ""
|
|
86
|
+
};
|
|
87
|
+
yield {
|
|
88
|
+
type: "tool_call_start",
|
|
89
|
+
content: toolCall.function.name,
|
|
90
|
+
toolCallId: toolCall.id,
|
|
91
|
+
...raw
|
|
92
|
+
};
|
|
93
|
+
} else if (toolCall.function?.arguments && currentToolCall) {
|
|
94
|
+
currentToolCall.arguments += toolCall.function.arguments;
|
|
95
|
+
yield {
|
|
96
|
+
type: "tool_call_delta",
|
|
97
|
+
content: toolCall.function.arguments,
|
|
98
|
+
toolCallId: currentToolCall.id,
|
|
99
|
+
...raw
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
if (choice.finish_reason === "tool_calls" && currentToolCall) {
|
|
106
|
+
yield {
|
|
107
|
+
type: "tool_call",
|
|
108
|
+
toolCall: {
|
|
109
|
+
id: currentToolCall.id,
|
|
110
|
+
name: currentToolCall.name,
|
|
111
|
+
arguments: this.safeParseJSON(currentToolCall.arguments)
|
|
112
|
+
},
|
|
113
|
+
...raw
|
|
114
|
+
};
|
|
115
|
+
currentToolCall = null;
|
|
116
|
+
}
|
|
117
|
+
if (data.usage) {
|
|
118
|
+
yield {
|
|
119
|
+
type: "metadata",
|
|
120
|
+
usagePhase: "output",
|
|
121
|
+
metadata: {
|
|
122
|
+
usage: {
|
|
123
|
+
promptTokens: data.usage.prompt_tokens,
|
|
124
|
+
completionTokens: data.usage.completion_tokens,
|
|
125
|
+
totalTokens: data.usage.total_tokens
|
|
126
|
+
}
|
|
127
|
+
},
|
|
128
|
+
...raw
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
} catch {
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
if (currentToolCall) {
|
|
136
|
+
yield {
|
|
137
|
+
type: "tool_call",
|
|
138
|
+
toolCall: {
|
|
139
|
+
id: currentToolCall.id,
|
|
140
|
+
name: currentToolCall.name,
|
|
141
|
+
arguments: this.safeParseJSON(currentToolCall.arguments)
|
|
142
|
+
},
|
|
143
|
+
...params.includeRawStreamEvents ? { providerRaw: { trailing: true } } : {}
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
yield { type: "done" };
|
|
147
|
+
} finally {
|
|
148
|
+
reader.releaseLock();
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
async complete(params) {
|
|
152
|
+
const body = this.buildRequestBody(params, false);
|
|
153
|
+
const response = await this.fetch("/chat/completions", body);
|
|
154
|
+
if (!response.ok) {
|
|
155
|
+
const error = await response.text();
|
|
156
|
+
throw new Error(`OpenAI API error: ${response.status} - ${error}`);
|
|
157
|
+
}
|
|
158
|
+
const data = await response.json();
|
|
159
|
+
const choice = data.choices?.[0];
|
|
160
|
+
if (!choice) {
|
|
161
|
+
throw new Error("No completion choice returned");
|
|
162
|
+
}
|
|
163
|
+
const result = {
|
|
164
|
+
content: choice.message?.content || ""
|
|
165
|
+
};
|
|
166
|
+
if (choice.message?.tool_calls) {
|
|
167
|
+
result.toolCalls = choice.message.tool_calls.map((tc) => ({
|
|
168
|
+
id: tc.id,
|
|
169
|
+
name: tc.function.name,
|
|
170
|
+
arguments: this.safeParseJSON(tc.function.arguments)
|
|
171
|
+
}));
|
|
172
|
+
}
|
|
173
|
+
if (data.usage) {
|
|
174
|
+
result.usage = {
|
|
175
|
+
promptTokens: data.usage.prompt_tokens,
|
|
176
|
+
completionTokens: data.usage.completion_tokens,
|
|
177
|
+
totalTokens: data.usage.total_tokens
|
|
178
|
+
};
|
|
179
|
+
}
|
|
180
|
+
return result;
|
|
181
|
+
}
|
|
182
|
+
buildRequestBody(params, stream) {
|
|
183
|
+
const messages = this.transformMessages(params.messages);
|
|
184
|
+
const body = {
|
|
185
|
+
model: this.model,
|
|
186
|
+
messages,
|
|
187
|
+
stream,
|
|
188
|
+
...stream && { stream_options: { include_usage: true } },
|
|
189
|
+
...params.temperature !== void 0 && { temperature: params.temperature },
|
|
190
|
+
...params.maxTokens !== void 0 && { max_tokens: params.maxTokens },
|
|
191
|
+
...params.stopSequences && { stop: params.stopSequences }
|
|
192
|
+
};
|
|
193
|
+
if (params.tools && params.tools.length > 0) {
|
|
194
|
+
body.tools = toolsToModelSchema(params.tools).map((tool) => ({
|
|
195
|
+
type: "function",
|
|
196
|
+
function: tool
|
|
197
|
+
}));
|
|
198
|
+
}
|
|
199
|
+
return body;
|
|
200
|
+
}
|
|
201
|
+
async fetch(path, body, signal) {
|
|
202
|
+
const headers = {
|
|
203
|
+
"Content-Type": "application/json",
|
|
204
|
+
"Authorization": `Bearer ${this.apiKey}`
|
|
205
|
+
};
|
|
206
|
+
if (this.organization) {
|
|
207
|
+
headers["OpenAI-Organization"] = this.organization;
|
|
208
|
+
}
|
|
209
|
+
return globalThis.fetch(`${this.baseUrl}${path}`, {
|
|
210
|
+
method: "POST",
|
|
211
|
+
headers,
|
|
212
|
+
body: JSON.stringify(body),
|
|
213
|
+
signal
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
safeParseJSON(str) {
|
|
217
|
+
try {
|
|
218
|
+
return JSON.parse(str);
|
|
219
|
+
} catch {
|
|
220
|
+
return str;
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
};
|
|
224
|
+
function createOpenAI(config) {
|
|
225
|
+
return new OpenAIAdapter(config);
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
// src/models/anthropic.ts
|
|
229
|
+
var ANTHROPIC_CAPABILITIES = {
|
|
230
|
+
"claude-sonnet-4-20250514": { contextLength: 2e5, maxOutputTokens: 16384 },
|
|
231
|
+
"claude-haiku": { contextLength: 2e5, maxOutputTokens: 8192 },
|
|
232
|
+
"claude-3-5-sonnet-20241022": { contextLength: 2e5, maxOutputTokens: 8192 },
|
|
233
|
+
"claude-3-haiku-20240307": { contextLength: 2e5, maxOutputTokens: 4096 }
|
|
234
|
+
};
|
|
235
|
+
var AnthropicAdapter = class extends BaseModelAdapter {
|
|
236
|
+
name;
|
|
237
|
+
apiKey;
|
|
238
|
+
baseUrl;
|
|
239
|
+
model;
|
|
240
|
+
version;
|
|
241
|
+
constructor(config = {}) {
|
|
242
|
+
super();
|
|
243
|
+
this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY || "";
|
|
244
|
+
this.baseUrl = config.baseUrl || process.env.ANTHROPIC_BASE_URL || "https://api.anthropic.com";
|
|
245
|
+
this.model = config.model || "claude-sonnet-4-20250514";
|
|
246
|
+
this.version = config.version || "2023-06-01";
|
|
247
|
+
if (!this.apiKey) {
|
|
248
|
+
throw new Error("Anthropic API key is required. Set ANTHROPIC_API_KEY environment variable or pass apiKey in config.");
|
|
249
|
+
}
|
|
250
|
+
this.name = `anthropic/${this.model}`;
|
|
251
|
+
this.capabilities = config.capabilities ?? ANTHROPIC_CAPABILITIES[this.model] ?? { contextLength: 2e5, maxOutputTokens: 4096 };
|
|
252
|
+
}
|
|
253
|
+
async *stream(params) {
|
|
254
|
+
const body = this.buildRequestBody(params, true);
|
|
255
|
+
const response = await this.fetch("/v1/messages", body, params.signal);
|
|
256
|
+
if (!response.ok) {
|
|
257
|
+
const error = await response.text();
|
|
258
|
+
throw new Error(`Anthropic API error: ${response.status} - ${error}`);
|
|
259
|
+
}
|
|
260
|
+
const reader = response.body?.getReader();
|
|
261
|
+
if (!reader) {
|
|
262
|
+
throw new Error("No response body");
|
|
263
|
+
}
|
|
264
|
+
const decoder = new TextDecoder();
|
|
265
|
+
let buffer = "";
|
|
266
|
+
let currentToolCall = null;
|
|
267
|
+
let currentThinkingBlock = null;
|
|
268
|
+
try {
|
|
269
|
+
while (true) {
|
|
270
|
+
if (params.signal?.aborted) {
|
|
271
|
+
reader.cancel();
|
|
272
|
+
break;
|
|
273
|
+
}
|
|
274
|
+
const { done, value } = await reader.read();
|
|
275
|
+
if (done) break;
|
|
276
|
+
buffer += decoder.decode(value, { stream: true });
|
|
277
|
+
const lines = buffer.split("\n");
|
|
278
|
+
buffer = lines.pop() || "";
|
|
279
|
+
for (const line of lines) {
|
|
280
|
+
const trimmed = line.trim();
|
|
281
|
+
if (!trimmed || !trimmed.startsWith("data:")) continue;
|
|
282
|
+
let jsonStart = 5;
|
|
283
|
+
if (trimmed.length > 5 && trimmed[5] === " ") {
|
|
284
|
+
jsonStart = 6;
|
|
285
|
+
}
|
|
286
|
+
const jsonStr = trimmed.slice(jsonStart);
|
|
287
|
+
try {
|
|
288
|
+
const data = JSON.parse(jsonStr);
|
|
289
|
+
const raw = params.includeRawStreamEvents ? { providerRaw: data } : {};
|
|
290
|
+
switch (data.type) {
|
|
291
|
+
case "content_block_start":
|
|
292
|
+
if (data.content_block?.type === "tool_use") {
|
|
293
|
+
currentToolCall = {
|
|
294
|
+
id: data.content_block.id,
|
|
295
|
+
name: data.content_block.name,
|
|
296
|
+
input: ""
|
|
297
|
+
};
|
|
298
|
+
yield {
|
|
299
|
+
type: "tool_call_start",
|
|
300
|
+
toolCall: {
|
|
301
|
+
id: data.content_block.id,
|
|
302
|
+
name: data.content_block.name,
|
|
303
|
+
arguments: {}
|
|
304
|
+
},
|
|
305
|
+
...raw
|
|
306
|
+
};
|
|
307
|
+
} else if (data.content_block?.type === "thinking") {
|
|
308
|
+
currentThinkingBlock = {
|
|
309
|
+
signature: data.content_block.signature
|
|
310
|
+
};
|
|
311
|
+
yield {
|
|
312
|
+
type: "thinking",
|
|
313
|
+
content: data.content_block.thinking,
|
|
314
|
+
signature: currentThinkingBlock.signature,
|
|
315
|
+
...raw
|
|
316
|
+
};
|
|
317
|
+
}
|
|
318
|
+
break;
|
|
319
|
+
case "content_block_delta":
|
|
320
|
+
if (data.delta?.type === "text_delta") {
|
|
321
|
+
yield { type: "text", content: data.delta.text, ...raw };
|
|
322
|
+
} else if (data.delta?.type === "thinking_delta") {
|
|
323
|
+
yield {
|
|
324
|
+
type: "thinking",
|
|
325
|
+
content: data.delta.thinking,
|
|
326
|
+
signature: currentThinkingBlock?.signature,
|
|
327
|
+
...raw
|
|
328
|
+
};
|
|
329
|
+
} else if (data.delta?.type === "input_json_delta" && currentToolCall) {
|
|
330
|
+
currentToolCall.input += data.delta.partial_json;
|
|
331
|
+
yield {
|
|
332
|
+
type: "tool_call_delta",
|
|
333
|
+
content: data.delta.partial_json,
|
|
334
|
+
toolCallId: currentToolCall.id,
|
|
335
|
+
...raw
|
|
336
|
+
};
|
|
337
|
+
}
|
|
338
|
+
break;
|
|
339
|
+
case "content_block_stop":
|
|
340
|
+
if (currentToolCall) {
|
|
341
|
+
yield {
|
|
342
|
+
type: "tool_call",
|
|
343
|
+
toolCall: {
|
|
344
|
+
id: currentToolCall.id,
|
|
345
|
+
name: currentToolCall.name,
|
|
346
|
+
arguments: this.safeParseJSON(currentToolCall.input)
|
|
347
|
+
},
|
|
348
|
+
...raw
|
|
349
|
+
};
|
|
350
|
+
currentToolCall = null;
|
|
351
|
+
}
|
|
352
|
+
if (currentThinkingBlock) {
|
|
353
|
+
currentThinkingBlock = null;
|
|
354
|
+
}
|
|
355
|
+
break;
|
|
356
|
+
case "message_start":
|
|
357
|
+
if (data.message?.usage) {
|
|
358
|
+
const usage = data.message.usage;
|
|
359
|
+
const actualInputTokens = usage.input_tokens + (usage.cache_read_input_tokens || 0);
|
|
360
|
+
yield {
|
|
361
|
+
type: "metadata",
|
|
362
|
+
usagePhase: "input",
|
|
363
|
+
metadata: {
|
|
364
|
+
usage: {
|
|
365
|
+
promptTokens: actualInputTokens,
|
|
366
|
+
completionTokens: 0,
|
|
367
|
+
totalTokens: actualInputTokens,
|
|
368
|
+
// 传递缓存信息
|
|
369
|
+
cacheReadTokens: usage.cache_read_input_tokens || 0,
|
|
370
|
+
cacheWriteTokens: usage.cache_creation_input_tokens || 0
|
|
371
|
+
}
|
|
372
|
+
},
|
|
373
|
+
...raw
|
|
374
|
+
};
|
|
375
|
+
}
|
|
376
|
+
break;
|
|
377
|
+
case "message_delta":
|
|
378
|
+
if (data.usage) {
|
|
379
|
+
yield {
|
|
380
|
+
type: "metadata",
|
|
381
|
+
usagePhase: "output",
|
|
382
|
+
metadata: {
|
|
383
|
+
usage: {
|
|
384
|
+
promptTokens: 0,
|
|
385
|
+
completionTokens: data.usage.output_tokens,
|
|
386
|
+
totalTokens: data.usage.output_tokens
|
|
387
|
+
}
|
|
388
|
+
},
|
|
389
|
+
...raw
|
|
390
|
+
};
|
|
391
|
+
}
|
|
392
|
+
break;
|
|
393
|
+
}
|
|
394
|
+
} catch {
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
yield { type: "done" };
|
|
399
|
+
} finally {
|
|
400
|
+
reader.releaseLock();
|
|
401
|
+
}
|
|
402
|
+
}
|
|
403
|
+
async complete(params) {
|
|
404
|
+
const body = this.buildRequestBody(params, false);
|
|
405
|
+
const response = await this.fetch("/v1/messages", body);
|
|
406
|
+
if (!response.ok) {
|
|
407
|
+
const error = await response.text();
|
|
408
|
+
throw new Error(`Anthropic API error: ${response.status} - ${error}`);
|
|
409
|
+
}
|
|
410
|
+
const data = await response.json();
|
|
411
|
+
const result = {
|
|
412
|
+
content: ""
|
|
413
|
+
};
|
|
414
|
+
const toolCalls = [];
|
|
415
|
+
for (const block of data.content || []) {
|
|
416
|
+
if (block.type === "text") {
|
|
417
|
+
result.content += block.text;
|
|
418
|
+
} else if (block.type === "tool_use") {
|
|
419
|
+
toolCalls.push({
|
|
420
|
+
id: block.id,
|
|
421
|
+
name: block.name,
|
|
422
|
+
arguments: block.input
|
|
423
|
+
});
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
if (toolCalls.length > 0) {
|
|
427
|
+
result.toolCalls = toolCalls;
|
|
428
|
+
}
|
|
429
|
+
if (data.usage) {
|
|
430
|
+
const usage = data.usage;
|
|
431
|
+
const actualInputTokens = usage.input_tokens + (usage.cache_read_input_tokens || 0);
|
|
432
|
+
result.usage = {
|
|
433
|
+
promptTokens: actualInputTokens,
|
|
434
|
+
completionTokens: usage.output_tokens,
|
|
435
|
+
totalTokens: actualInputTokens + usage.output_tokens
|
|
436
|
+
};
|
|
437
|
+
}
|
|
438
|
+
return result;
|
|
439
|
+
}
|
|
440
|
+
buildRequestBody(params, stream) {
|
|
441
|
+
const { system, messages } = this.extractSystemMessage(params.messages);
|
|
442
|
+
const transformedMessages = this.transformAnthropicMessages(messages);
|
|
443
|
+
const body = {
|
|
444
|
+
model: this.model,
|
|
445
|
+
max_tokens: params.maxTokens || 4096,
|
|
446
|
+
messages: transformedMessages,
|
|
447
|
+
stream,
|
|
448
|
+
...system && { system },
|
|
449
|
+
...params.temperature !== void 0 && { temperature: params.temperature }
|
|
450
|
+
};
|
|
451
|
+
if (params.tools && params.tools.length > 0) {
|
|
452
|
+
body.tools = toolsToModelSchema(params.tools).map((tool) => ({
|
|
453
|
+
name: tool.name,
|
|
454
|
+
description: tool.description,
|
|
455
|
+
input_schema: tool.parameters
|
|
456
|
+
}));
|
|
457
|
+
}
|
|
458
|
+
return body;
|
|
459
|
+
}
|
|
460
|
+
extractSystemMessage(messages) {
|
|
461
|
+
const systemMessages = messages.filter((m) => m.role === "system");
|
|
462
|
+
const otherMessages = messages.filter((m) => m.role !== "system");
|
|
463
|
+
const combinedSystem = systemMessages.length > 0 ? systemMessages.map((m) => m.content).join("\n\n") : void 0;
|
|
464
|
+
return {
|
|
465
|
+
system: combinedSystem,
|
|
466
|
+
messages: otherMessages
|
|
467
|
+
};
|
|
468
|
+
}
|
|
469
|
+
transformAnthropicMessages(messages) {
|
|
470
|
+
return messages.map((msg) => {
|
|
471
|
+
const transformed = {
|
|
472
|
+
role: msg.role === "assistant" ? "assistant" : "user",
|
|
473
|
+
content: []
|
|
474
|
+
};
|
|
475
|
+
if (typeof msg.content === "string") {
|
|
476
|
+
transformed.content = [{ type: "text", text: msg.content }];
|
|
477
|
+
} else if (Array.isArray(msg.content)) {
|
|
478
|
+
const contentParts = [];
|
|
479
|
+
for (const part of msg.content) {
|
|
480
|
+
if (part.type === "thinking") {
|
|
481
|
+
contentParts.push(part);
|
|
482
|
+
} else if (part.type === "text") {
|
|
483
|
+
contentParts.push({ type: "text", text: part.text });
|
|
484
|
+
} else {
|
|
485
|
+
contentParts.push(part);
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
transformed.content = contentParts;
|
|
489
|
+
if (contentParts.length === 0) {
|
|
490
|
+
transformed.content = "";
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
if (msg.toolCalls && msg.role === "assistant") {
|
|
494
|
+
for (const tc of msg.toolCalls) {
|
|
495
|
+
transformed.content.push({
|
|
496
|
+
type: "tool_use",
|
|
497
|
+
id: tc.id,
|
|
498
|
+
name: tc.name,
|
|
499
|
+
input: tc.arguments
|
|
500
|
+
});
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
if (msg.role === "tool" && msg.toolCallId) {
|
|
504
|
+
transformed.role = "user";
|
|
505
|
+
transformed.content = [{
|
|
506
|
+
type: "tool_result",
|
|
507
|
+
tool_use_id: msg.toolCallId,
|
|
508
|
+
content: msg.content
|
|
509
|
+
}];
|
|
510
|
+
}
|
|
511
|
+
return transformed;
|
|
512
|
+
});
|
|
513
|
+
}
|
|
514
|
+
async fetch(path, body, signal) {
|
|
515
|
+
return globalThis.fetch(`${this.baseUrl}${path}`, {
|
|
516
|
+
method: "POST",
|
|
517
|
+
headers: {
|
|
518
|
+
"Content-Type": "application/json",
|
|
519
|
+
"x-api-key": this.apiKey,
|
|
520
|
+
"anthropic-version": this.version
|
|
521
|
+
},
|
|
522
|
+
body: JSON.stringify(body),
|
|
523
|
+
signal
|
|
524
|
+
});
|
|
525
|
+
}
|
|
526
|
+
safeParseJSON(str) {
|
|
527
|
+
try {
|
|
528
|
+
return JSON.parse(str);
|
|
529
|
+
} catch {
|
|
530
|
+
return str;
|
|
531
|
+
}
|
|
532
|
+
}
|
|
533
|
+
};
|
|
534
|
+
function createAnthropic(config) {
|
|
535
|
+
return new AnthropicAdapter(config);
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
// src/models/ollama.ts
|
|
539
|
+
var OLLAMA_CAPABILITIES = {
|
|
540
|
+
"qwen3.5:0.8b": { contextLength: 32768, maxOutputTokens: 4096 },
|
|
541
|
+
"minimax-m2.7:cloud": { contextLength: 128e3, maxOutputTokens: 16384 },
|
|
542
|
+
"nemotron-3-super:cloud": { contextLength: 128e3, maxOutputTokens: 16384 },
|
|
543
|
+
"glm-5:cloud": { contextLength: 128e3, maxOutputTokens: 16384 }
|
|
544
|
+
};
|
|
545
|
+
function ollamaStreamChunksFromChatData(data, parseToolArguments, nextToolCallId) {
|
|
546
|
+
const chunks = [];
|
|
547
|
+
const msg = data.message;
|
|
548
|
+
if (!msg) return chunks;
|
|
549
|
+
const thinking = msg.thinking;
|
|
550
|
+
if (typeof thinking === "string" && thinking.length > 0) {
|
|
551
|
+
chunks.push({ type: "thinking", content: thinking });
|
|
552
|
+
}
|
|
553
|
+
const content = msg.content;
|
|
554
|
+
if (typeof content === "string" && content.length > 0) {
|
|
555
|
+
chunks.push({ type: "text", content });
|
|
556
|
+
}
|
|
557
|
+
const toolCalls = msg.tool_calls;
|
|
558
|
+
if (toolCalls && Array.isArray(toolCalls)) {
|
|
559
|
+
for (const tc of toolCalls) {
|
|
560
|
+
const t = tc;
|
|
561
|
+
const fn = t.function;
|
|
562
|
+
chunks.push({
|
|
563
|
+
type: "tool_call",
|
|
564
|
+
toolCall: {
|
|
565
|
+
id: nextToolCallId(),
|
|
566
|
+
name: (typeof fn?.name === "string" ? fn.name : "") || "",
|
|
567
|
+
arguments: parseToolArguments(fn?.arguments)
|
|
568
|
+
}
|
|
569
|
+
});
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
return chunks;
|
|
573
|
+
}
|
|
574
|
+
function ollamaMessageContentToApiString(content) {
|
|
575
|
+
if (typeof content === "string") return content;
|
|
576
|
+
if (!Array.isArray(content)) return "";
|
|
577
|
+
const texts = [];
|
|
578
|
+
for (const part of content) {
|
|
579
|
+
if (part.type === "text") {
|
|
580
|
+
texts.push(part.text);
|
|
581
|
+
}
|
|
582
|
+
}
|
|
583
|
+
return texts.join("\n\n");
|
|
584
|
+
}
|
|
585
|
+
function uniqueOllamaToolCallId(batchMs, index) {
|
|
586
|
+
return `ollama_${batchMs}_${index}_${Math.random().toString(36).slice(2, 11)}`;
|
|
587
|
+
}
|
|
588
|
+
var OllamaAdapter = class extends BaseModelAdapter {
|
|
589
|
+
name;
|
|
590
|
+
baseUrl;
|
|
591
|
+
model;
|
|
592
|
+
think;
|
|
593
|
+
constructor(config = {}) {
|
|
594
|
+
super();
|
|
595
|
+
this.baseUrl = config.baseUrl || process.env.OLLAMA_BASE_URL || "http://localhost:11434";
|
|
596
|
+
this.model = config.model || "qwen3.5:0.8b";
|
|
597
|
+
this.think = config.think;
|
|
598
|
+
this.name = `ollama/${this.model}`;
|
|
599
|
+
this.capabilities = config.capabilities ?? OLLAMA_CAPABILITIES[this.model] ?? { contextLength: 4096, maxOutputTokens: 2048 };
|
|
600
|
+
}
|
|
601
|
+
async *stream(params) {
|
|
602
|
+
const body = this.buildRequestBody(params, true);
|
|
603
|
+
const response = await this.fetch("/api/chat", body, params.signal);
|
|
604
|
+
if (!response.ok) {
|
|
605
|
+
const error = await response.text();
|
|
606
|
+
throw new Error(`Ollama API error: ${response.status} - ${error}`);
|
|
607
|
+
}
|
|
608
|
+
const reader = response.body?.getReader();
|
|
609
|
+
if (!reader) {
|
|
610
|
+
throw new Error("No response body");
|
|
611
|
+
}
|
|
612
|
+
const decoder = new TextDecoder();
|
|
613
|
+
let buffer = "";
|
|
614
|
+
const nextToolCallId = () => `ollama_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`;
|
|
615
|
+
try {
|
|
616
|
+
while (true) {
|
|
617
|
+
if (params.signal?.aborted) {
|
|
618
|
+
reader.cancel();
|
|
619
|
+
break;
|
|
620
|
+
}
|
|
621
|
+
const { done, value } = await reader.read();
|
|
622
|
+
if (done) break;
|
|
623
|
+
buffer += decoder.decode(value, { stream: true });
|
|
624
|
+
const lines = buffer.split("\n");
|
|
625
|
+
buffer = lines.pop() || "";
|
|
626
|
+
for (const line of lines) {
|
|
627
|
+
const trimmed = line.trim();
|
|
628
|
+
if (!trimmed) continue;
|
|
629
|
+
try {
|
|
630
|
+
const data = JSON.parse(trimmed);
|
|
631
|
+
const raw = params.includeRawStreamEvents ? { providerRaw: data } : {};
|
|
632
|
+
const messageChunks = ollamaStreamChunksFromChatData(
|
|
633
|
+
data,
|
|
634
|
+
(args) => this.parseToolArguments(args),
|
|
635
|
+
nextToolCallId
|
|
636
|
+
);
|
|
637
|
+
for (const chunk of messageChunks) {
|
|
638
|
+
yield { ...chunk, ...raw };
|
|
639
|
+
}
|
|
640
|
+
if (data.done) {
|
|
641
|
+
if (data.prompt_eval_count || data.eval_count) {
|
|
642
|
+
yield {
|
|
643
|
+
type: "metadata",
|
|
644
|
+
usagePhase: "output",
|
|
645
|
+
metadata: {
|
|
646
|
+
usage: {
|
|
647
|
+
promptTokens: data.prompt_eval_count || 0,
|
|
648
|
+
completionTokens: data.eval_count || 0,
|
|
649
|
+
totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0)
|
|
650
|
+
}
|
|
651
|
+
},
|
|
652
|
+
...raw
|
|
653
|
+
};
|
|
654
|
+
}
|
|
655
|
+
yield { type: "done", ...raw };
|
|
656
|
+
}
|
|
657
|
+
} catch {
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
}
|
|
661
|
+
} finally {
|
|
662
|
+
reader.releaseLock();
|
|
663
|
+
}
|
|
664
|
+
}
|
|
665
|
+
async complete(params) {
|
|
666
|
+
const body = this.buildRequestBody(params, false);
|
|
667
|
+
const response = await this.fetch("/api/chat", body);
|
|
668
|
+
if (!response.ok) {
|
|
669
|
+
const error = await response.text();
|
|
670
|
+
throw new Error(`Ollama API error: ${response.status} - ${error}`);
|
|
671
|
+
}
|
|
672
|
+
const data = await response.json();
|
|
673
|
+
const result = {
|
|
674
|
+
content: data.message?.content || ""
|
|
675
|
+
};
|
|
676
|
+
const thinking = data.message?.thinking;
|
|
677
|
+
if (typeof thinking === "string" && thinking.length > 0) {
|
|
678
|
+
result.thinking = thinking;
|
|
679
|
+
}
|
|
680
|
+
if (data.message?.tool_calls) {
|
|
681
|
+
const batchMs = Date.now();
|
|
682
|
+
result.toolCalls = data.message.tool_calls.map((tc, index) => ({
|
|
683
|
+
id: uniqueOllamaToolCallId(batchMs, index),
|
|
684
|
+
name: tc.function?.name || "",
|
|
685
|
+
arguments: this.parseToolArguments(tc.function?.arguments)
|
|
686
|
+
}));
|
|
687
|
+
}
|
|
688
|
+
if (data.prompt_eval_count || data.eval_count) {
|
|
689
|
+
result.usage = {
|
|
690
|
+
promptTokens: data.prompt_eval_count || 0,
|
|
691
|
+
completionTokens: data.eval_count || 0,
|
|
692
|
+
totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0)
|
|
693
|
+
};
|
|
694
|
+
}
|
|
695
|
+
return result;
|
|
696
|
+
}
|
|
697
|
+
parseToolArguments(args) {
|
|
698
|
+
if (args == null) return {};
|
|
699
|
+
if (typeof args === "object" && !Array.isArray(args)) return args;
|
|
700
|
+
if (typeof args === "string") {
|
|
701
|
+
try {
|
|
702
|
+
const parsed = JSON.parse(args);
|
|
703
|
+
return typeof parsed === "object" && parsed !== null ? parsed : { value: parsed };
|
|
704
|
+
} catch {
|
|
705
|
+
return {};
|
|
706
|
+
}
|
|
707
|
+
}
|
|
708
|
+
return {};
|
|
709
|
+
}
|
|
710
|
+
/**
|
|
711
|
+
* Ollama 要求 tool_calls.function.arguments 为对象,而非 JSON 字符串。
|
|
712
|
+
* 工具结果消息使用 tool_name(见 https://docs.ollama.com/capabilities/tool-calling ),非 OpenAI 的 tool_call_id。
|
|
713
|
+
*/
|
|
714
|
+
transformMessages(messages) {
|
|
715
|
+
const toolCallIdToName = /* @__PURE__ */ new Map();
|
|
716
|
+
for (const msg of messages) {
|
|
717
|
+
if (msg.role === "assistant" && msg.toolCalls) {
|
|
718
|
+
for (const tc of msg.toolCalls) {
|
|
719
|
+
toolCallIdToName.set(tc.id, tc.name);
|
|
720
|
+
}
|
|
721
|
+
}
|
|
722
|
+
}
|
|
723
|
+
return messages.map((msg) => {
|
|
724
|
+
if (msg.role === "tool" && msg.toolCallId) {
|
|
725
|
+
const toolName = toolCallIdToName.get(msg.toolCallId) ?? msg.name;
|
|
726
|
+
return {
|
|
727
|
+
role: "tool",
|
|
728
|
+
content: ollamaMessageContentToApiString(msg.content),
|
|
729
|
+
...toolName && { tool_name: toolName }
|
|
730
|
+
};
|
|
731
|
+
}
|
|
732
|
+
return {
|
|
733
|
+
role: msg.role,
|
|
734
|
+
content: ollamaMessageContentToApiString(msg.content),
|
|
735
|
+
...msg.toolCalls && { tool_calls: msg.toolCalls.map((tc) => ({
|
|
736
|
+
id: tc.id,
|
|
737
|
+
type: "function",
|
|
738
|
+
function: {
|
|
739
|
+
name: tc.name,
|
|
740
|
+
arguments: this.parseToolArguments(tc.arguments)
|
|
741
|
+
}
|
|
742
|
+
})) }
|
|
743
|
+
};
|
|
744
|
+
});
|
|
745
|
+
}
|
|
746
|
+
buildRequestBody(params, stream) {
|
|
747
|
+
const body = {
|
|
748
|
+
model: this.model,
|
|
749
|
+
messages: this.transformMessages(params.messages),
|
|
750
|
+
stream,
|
|
751
|
+
...params.temperature !== void 0 && { options: { temperature: params.temperature } }
|
|
752
|
+
};
|
|
753
|
+
if (this.think !== void 0) {
|
|
754
|
+
body.think = this.think;
|
|
755
|
+
}
|
|
756
|
+
if (params.tools && params.tools.length > 0) {
|
|
757
|
+
body.tools = toolsToModelSchema(params.tools).map((tool) => ({
|
|
758
|
+
type: "function",
|
|
759
|
+
function: tool
|
|
760
|
+
}));
|
|
761
|
+
}
|
|
762
|
+
return body;
|
|
763
|
+
}
|
|
764
|
+
async fetch(path, body, signal) {
|
|
765
|
+
return globalThis.fetch(`${this.baseUrl}${path}`, {
|
|
766
|
+
method: "POST",
|
|
767
|
+
headers: {
|
|
768
|
+
"Content-Type": "application/json"
|
|
769
|
+
},
|
|
770
|
+
body: JSON.stringify(body),
|
|
771
|
+
signal
|
|
772
|
+
});
|
|
773
|
+
}
|
|
774
|
+
};
|
|
775
|
+
function createOllama(config) {
|
|
776
|
+
return new OllamaAdapter(config);
|
|
777
|
+
}
|
|
778
|
+
|
|
779
|
+
// src/models/index.ts
|
|
780
|
+
function createModel(config) {
|
|
781
|
+
switch (config.provider) {
|
|
782
|
+
case "openai":
|
|
783
|
+
return new OpenAIAdapter({
|
|
784
|
+
apiKey: config.apiKey,
|
|
785
|
+
baseUrl: config.baseUrl,
|
|
786
|
+
model: config.model
|
|
787
|
+
});
|
|
788
|
+
case "anthropic":
|
|
789
|
+
return new AnthropicAdapter({
|
|
790
|
+
apiKey: config.apiKey,
|
|
791
|
+
baseUrl: config.baseUrl,
|
|
792
|
+
model: config.model
|
|
793
|
+
});
|
|
794
|
+
case "ollama":
|
|
795
|
+
return new OllamaAdapter({
|
|
796
|
+
baseUrl: config.baseUrl,
|
|
797
|
+
model: config.model,
|
|
798
|
+
think: config.think
|
|
799
|
+
});
|
|
800
|
+
default:
|
|
801
|
+
throw new Error(`Unknown model provider: ${config.provider}`);
|
|
802
|
+
}
|
|
803
|
+
}
|
|
804
|
+
|
|
805
|
+
export { AnthropicAdapter, OllamaAdapter, OpenAIAdapter, createAnthropic, createModel, createOllama, createOpenAI, ollamaMessageContentToApiString, ollamaStreamChunksFromChatData };
|
|
806
|
+
//# sourceMappingURL=chunk-NDSL7NPN.js.map
|
|
807
|
+
//# sourceMappingURL=chunk-NDSL7NPN.js.map
|