@ddlqhd/agent-sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +53 -0
  2. package/dist/chunk-5QMA2YBY.cjs +2880 -0
  3. package/dist/chunk-5QMA2YBY.cjs.map +1 -0
  4. package/dist/chunk-5Y56A64C.cjs +5 -0
  5. package/dist/chunk-5Y56A64C.cjs.map +1 -0
  6. package/dist/chunk-A3S3AGE3.js +3 -0
  7. package/dist/chunk-A3S3AGE3.js.map +1 -0
  8. package/dist/chunk-CNSGZVRN.cjs +152 -0
  9. package/dist/chunk-CNSGZVRN.cjs.map +1 -0
  10. package/dist/chunk-JF5AJQMU.cjs +2788 -0
  11. package/dist/chunk-JF5AJQMU.cjs.map +1 -0
  12. package/dist/chunk-NDSL7NPN.js +807 -0
  13. package/dist/chunk-NDSL7NPN.js.map +1 -0
  14. package/dist/chunk-OHXW2YM6.js +2708 -0
  15. package/dist/chunk-OHXW2YM6.js.map +1 -0
  16. package/dist/chunk-Q3SOMX26.js +2854 -0
  17. package/dist/chunk-Q3SOMX26.js.map +1 -0
  18. package/dist/chunk-WH3APNQ5.js +147 -0
  19. package/dist/chunk-WH3APNQ5.js.map +1 -0
  20. package/dist/chunk-X35MHWXE.cjs +817 -0
  21. package/dist/chunk-X35MHWXE.cjs.map +1 -0
  22. package/dist/cli/index.cjs +926 -0
  23. package/dist/cli/index.cjs.map +1 -0
  24. package/dist/cli/index.d.cts +24 -0
  25. package/dist/cli/index.d.ts +24 -0
  26. package/dist/cli/index.js +916 -0
  27. package/dist/cli/index.js.map +1 -0
  28. package/dist/index-DPsZ1zat.d.ts +447 -0
  29. package/dist/index-RTPmFjMp.d.cts +447 -0
  30. package/dist/index.cjs +508 -0
  31. package/dist/index.cjs.map +1 -0
  32. package/dist/index.d.cts +664 -0
  33. package/dist/index.d.ts +664 -0
  34. package/dist/index.js +204 -0
  35. package/dist/index.js.map +1 -0
  36. package/dist/models/index.cjs +62 -0
  37. package/dist/models/index.cjs.map +1 -0
  38. package/dist/models/index.d.cts +165 -0
  39. package/dist/models/index.d.ts +165 -0
  40. package/dist/models/index.js +5 -0
  41. package/dist/models/index.js.map +1 -0
  42. package/dist/tools/index.cjs +207 -0
  43. package/dist/tools/index.cjs.map +1 -0
  44. package/dist/tools/index.d.cts +108 -0
  45. package/dist/tools/index.d.ts +108 -0
  46. package/dist/tools/index.js +6 -0
  47. package/dist/tools/index.js.map +1 -0
  48. package/dist/types-C0aX_Qdp.d.cts +917 -0
  49. package/dist/types-C0aX_Qdp.d.ts +917 -0
  50. package/package.json +80 -0
@@ -0,0 +1,817 @@
1
+ #!/usr/bin/env node
2
+ 'use strict';
3
+
4
+ var chunkCNSGZVRN_cjs = require('./chunk-CNSGZVRN.cjs');
5
+
6
+ // src/models/openai.ts
7
+ var OPENAI_CAPABILITIES = {
8
+ "gpt-4o": { contextLength: 128e3, maxOutputTokens: 16384 },
9
+ "gpt-4o-mini": { contextLength: 128e3, maxOutputTokens: 16384 },
10
+ "gpt-4-turbo": { contextLength: 128e3, maxOutputTokens: 4096 },
11
+ "gpt-4": { contextLength: 8192, maxOutputTokens: 4096 },
12
+ "gpt-3.5-turbo": { contextLength: 16385, maxOutputTokens: 4096 }
13
+ };
14
+ var OpenAIAdapter = class extends chunkCNSGZVRN_cjs.BaseModelAdapter {
15
+ name;
16
+ apiKey;
17
+ baseUrl;
18
+ model;
19
+ organization;
20
+ constructor(config = {}) {
21
+ super();
22
+ this.apiKey = config.apiKey || process.env.OPENAI_API_KEY || "";
23
+ this.baseUrl = config.baseUrl || process.env.OPENAI_BASE_URL || "https://api.openai.com/v1";
24
+ this.model = config.model || "gpt-4o";
25
+ this.organization = config.organization || process.env.OPENAI_ORG_ID;
26
+ if (!this.apiKey) {
27
+ throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass apiKey in config.");
28
+ }
29
+ this.name = `openai/${this.model}`;
30
+ this.capabilities = config.capabilities ?? OPENAI_CAPABILITIES[this.model] ?? { contextLength: 128e3, maxOutputTokens: 4096 };
31
+ }
32
+ async *stream(params) {
33
+ const body = this.buildRequestBody(params, true);
34
+ const response = await this.fetch("/chat/completions", body, params.signal);
35
+ if (!response.ok) {
36
+ const error = await response.text();
37
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
38
+ }
39
+ const reader = response.body?.getReader();
40
+ if (!reader) {
41
+ throw new Error("No response body");
42
+ }
43
+ const decoder = new TextDecoder();
44
+ let buffer = "";
45
+ let currentToolCall = null;
46
+ try {
47
+ while (true) {
48
+ if (params.signal?.aborted) {
49
+ reader.cancel();
50
+ break;
51
+ }
52
+ const { done, value } = await reader.read();
53
+ if (done) break;
54
+ buffer += decoder.decode(value, { stream: true });
55
+ const lines = buffer.split("\n");
56
+ buffer = lines.pop() || "";
57
+ for (const line of lines) {
58
+ const trimmed = line.trim();
59
+ if (!trimmed || trimmed === "data: [DONE]") continue;
60
+ if (!trimmed.startsWith("data: ")) continue;
61
+ try {
62
+ const data = JSON.parse(trimmed.slice(6));
63
+ const choice = data.choices?.[0];
64
+ if (!choice) continue;
65
+ const raw = params.includeRawStreamEvents ? { providerRaw: data } : {};
66
+ if (choice.delta?.content) {
67
+ yield { type: "text", content: choice.delta.content, ...raw };
68
+ }
69
+ if (choice.delta?.tool_calls) {
70
+ for (const toolCall of choice.delta.tool_calls) {
71
+ if (toolCall.index !== void 0) {
72
+ if (toolCall.id && toolCall.function?.name) {
73
+ if (currentToolCall) {
74
+ yield {
75
+ type: "tool_call",
76
+ toolCall: {
77
+ id: currentToolCall.id,
78
+ name: currentToolCall.name,
79
+ arguments: this.safeParseJSON(currentToolCall.arguments)
80
+ },
81
+ ...raw
82
+ };
83
+ }
84
+ currentToolCall = {
85
+ id: toolCall.id,
86
+ name: toolCall.function.name,
87
+ arguments: toolCall.function.arguments || ""
88
+ };
89
+ yield {
90
+ type: "tool_call_start",
91
+ content: toolCall.function.name,
92
+ toolCallId: toolCall.id,
93
+ ...raw
94
+ };
95
+ } else if (toolCall.function?.arguments && currentToolCall) {
96
+ currentToolCall.arguments += toolCall.function.arguments;
97
+ yield {
98
+ type: "tool_call_delta",
99
+ content: toolCall.function.arguments,
100
+ toolCallId: currentToolCall.id,
101
+ ...raw
102
+ };
103
+ }
104
+ }
105
+ }
106
+ }
107
+ if (choice.finish_reason === "tool_calls" && currentToolCall) {
108
+ yield {
109
+ type: "tool_call",
110
+ toolCall: {
111
+ id: currentToolCall.id,
112
+ name: currentToolCall.name,
113
+ arguments: this.safeParseJSON(currentToolCall.arguments)
114
+ },
115
+ ...raw
116
+ };
117
+ currentToolCall = null;
118
+ }
119
+ if (data.usage) {
120
+ yield {
121
+ type: "metadata",
122
+ usagePhase: "output",
123
+ metadata: {
124
+ usage: {
125
+ promptTokens: data.usage.prompt_tokens,
126
+ completionTokens: data.usage.completion_tokens,
127
+ totalTokens: data.usage.total_tokens
128
+ }
129
+ },
130
+ ...raw
131
+ };
132
+ }
133
+ } catch {
134
+ }
135
+ }
136
+ }
137
+ if (currentToolCall) {
138
+ yield {
139
+ type: "tool_call",
140
+ toolCall: {
141
+ id: currentToolCall.id,
142
+ name: currentToolCall.name,
143
+ arguments: this.safeParseJSON(currentToolCall.arguments)
144
+ },
145
+ ...params.includeRawStreamEvents ? { providerRaw: { trailing: true } } : {}
146
+ };
147
+ }
148
+ yield { type: "done" };
149
+ } finally {
150
+ reader.releaseLock();
151
+ }
152
+ }
153
+ async complete(params) {
154
+ const body = this.buildRequestBody(params, false);
155
+ const response = await this.fetch("/chat/completions", body);
156
+ if (!response.ok) {
157
+ const error = await response.text();
158
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
159
+ }
160
+ const data = await response.json();
161
+ const choice = data.choices?.[0];
162
+ if (!choice) {
163
+ throw new Error("No completion choice returned");
164
+ }
165
+ const result = {
166
+ content: choice.message?.content || ""
167
+ };
168
+ if (choice.message?.tool_calls) {
169
+ result.toolCalls = choice.message.tool_calls.map((tc) => ({
170
+ id: tc.id,
171
+ name: tc.function.name,
172
+ arguments: this.safeParseJSON(tc.function.arguments)
173
+ }));
174
+ }
175
+ if (data.usage) {
176
+ result.usage = {
177
+ promptTokens: data.usage.prompt_tokens,
178
+ completionTokens: data.usage.completion_tokens,
179
+ totalTokens: data.usage.total_tokens
180
+ };
181
+ }
182
+ return result;
183
+ }
184
+ buildRequestBody(params, stream) {
185
+ const messages = this.transformMessages(params.messages);
186
+ const body = {
187
+ model: this.model,
188
+ messages,
189
+ stream,
190
+ ...stream && { stream_options: { include_usage: true } },
191
+ ...params.temperature !== void 0 && { temperature: params.temperature },
192
+ ...params.maxTokens !== void 0 && { max_tokens: params.maxTokens },
193
+ ...params.stopSequences && { stop: params.stopSequences }
194
+ };
195
+ if (params.tools && params.tools.length > 0) {
196
+ body.tools = chunkCNSGZVRN_cjs.toolsToModelSchema(params.tools).map((tool) => ({
197
+ type: "function",
198
+ function: tool
199
+ }));
200
+ }
201
+ return body;
202
+ }
203
+ async fetch(path, body, signal) {
204
+ const headers = {
205
+ "Content-Type": "application/json",
206
+ "Authorization": `Bearer ${this.apiKey}`
207
+ };
208
+ if (this.organization) {
209
+ headers["OpenAI-Organization"] = this.organization;
210
+ }
211
+ return globalThis.fetch(`${this.baseUrl}${path}`, {
212
+ method: "POST",
213
+ headers,
214
+ body: JSON.stringify(body),
215
+ signal
216
+ });
217
+ }
218
+ safeParseJSON(str) {
219
+ try {
220
+ return JSON.parse(str);
221
+ } catch {
222
+ return str;
223
+ }
224
+ }
225
+ };
226
+ function createOpenAI(config) {
227
+ return new OpenAIAdapter(config);
228
+ }
229
+
230
+ // src/models/anthropic.ts
231
+ var ANTHROPIC_CAPABILITIES = {
232
+ "claude-sonnet-4-20250514": { contextLength: 2e5, maxOutputTokens: 16384 },
233
+ "claude-haiku": { contextLength: 2e5, maxOutputTokens: 8192 },
234
+ "claude-3-5-sonnet-20241022": { contextLength: 2e5, maxOutputTokens: 8192 },
235
+ "claude-3-haiku-20240307": { contextLength: 2e5, maxOutputTokens: 4096 }
236
+ };
237
+ var AnthropicAdapter = class extends chunkCNSGZVRN_cjs.BaseModelAdapter {
238
+ name;
239
+ apiKey;
240
+ baseUrl;
241
+ model;
242
+ version;
243
+ constructor(config = {}) {
244
+ super();
245
+ this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY || "";
246
+ this.baseUrl = config.baseUrl || process.env.ANTHROPIC_BASE_URL || "https://api.anthropic.com";
247
+ this.model = config.model || "claude-sonnet-4-20250514";
248
+ this.version = config.version || "2023-06-01";
249
+ if (!this.apiKey) {
250
+ throw new Error("Anthropic API key is required. Set ANTHROPIC_API_KEY environment variable or pass apiKey in config.");
251
+ }
252
+ this.name = `anthropic/${this.model}`;
253
+ this.capabilities = config.capabilities ?? ANTHROPIC_CAPABILITIES[this.model] ?? { contextLength: 2e5, maxOutputTokens: 4096 };
254
+ }
255
+ async *stream(params) {
256
+ const body = this.buildRequestBody(params, true);
257
+ const response = await this.fetch("/v1/messages", body, params.signal);
258
+ if (!response.ok) {
259
+ const error = await response.text();
260
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
261
+ }
262
+ const reader = response.body?.getReader();
263
+ if (!reader) {
264
+ throw new Error("No response body");
265
+ }
266
+ const decoder = new TextDecoder();
267
+ let buffer = "";
268
+ let currentToolCall = null;
269
+ let currentThinkingBlock = null;
270
+ try {
271
+ while (true) {
272
+ if (params.signal?.aborted) {
273
+ reader.cancel();
274
+ break;
275
+ }
276
+ const { done, value } = await reader.read();
277
+ if (done) break;
278
+ buffer += decoder.decode(value, { stream: true });
279
+ const lines = buffer.split("\n");
280
+ buffer = lines.pop() || "";
281
+ for (const line of lines) {
282
+ const trimmed = line.trim();
283
+ if (!trimmed || !trimmed.startsWith("data:")) continue;
284
+ let jsonStart = 5;
285
+ if (trimmed.length > 5 && trimmed[5] === " ") {
286
+ jsonStart = 6;
287
+ }
288
+ const jsonStr = trimmed.slice(jsonStart);
289
+ try {
290
+ const data = JSON.parse(jsonStr);
291
+ const raw = params.includeRawStreamEvents ? { providerRaw: data } : {};
292
+ switch (data.type) {
293
+ case "content_block_start":
294
+ if (data.content_block?.type === "tool_use") {
295
+ currentToolCall = {
296
+ id: data.content_block.id,
297
+ name: data.content_block.name,
298
+ input: ""
299
+ };
300
+ yield {
301
+ type: "tool_call_start",
302
+ toolCall: {
303
+ id: data.content_block.id,
304
+ name: data.content_block.name,
305
+ arguments: {}
306
+ },
307
+ ...raw
308
+ };
309
+ } else if (data.content_block?.type === "thinking") {
310
+ currentThinkingBlock = {
311
+ signature: data.content_block.signature
312
+ };
313
+ yield {
314
+ type: "thinking",
315
+ content: data.content_block.thinking,
316
+ signature: currentThinkingBlock.signature,
317
+ ...raw
318
+ };
319
+ }
320
+ break;
321
+ case "content_block_delta":
322
+ if (data.delta?.type === "text_delta") {
323
+ yield { type: "text", content: data.delta.text, ...raw };
324
+ } else if (data.delta?.type === "thinking_delta") {
325
+ yield {
326
+ type: "thinking",
327
+ content: data.delta.thinking,
328
+ signature: currentThinkingBlock?.signature,
329
+ ...raw
330
+ };
331
+ } else if (data.delta?.type === "input_json_delta" && currentToolCall) {
332
+ currentToolCall.input += data.delta.partial_json;
333
+ yield {
334
+ type: "tool_call_delta",
335
+ content: data.delta.partial_json,
336
+ toolCallId: currentToolCall.id,
337
+ ...raw
338
+ };
339
+ }
340
+ break;
341
+ case "content_block_stop":
342
+ if (currentToolCall) {
343
+ yield {
344
+ type: "tool_call",
345
+ toolCall: {
346
+ id: currentToolCall.id,
347
+ name: currentToolCall.name,
348
+ arguments: this.safeParseJSON(currentToolCall.input)
349
+ },
350
+ ...raw
351
+ };
352
+ currentToolCall = null;
353
+ }
354
+ if (currentThinkingBlock) {
355
+ currentThinkingBlock = null;
356
+ }
357
+ break;
358
+ case "message_start":
359
+ if (data.message?.usage) {
360
+ const usage = data.message.usage;
361
+ const actualInputTokens = usage.input_tokens + (usage.cache_read_input_tokens || 0);
362
+ yield {
363
+ type: "metadata",
364
+ usagePhase: "input",
365
+ metadata: {
366
+ usage: {
367
+ promptTokens: actualInputTokens,
368
+ completionTokens: 0,
369
+ totalTokens: actualInputTokens,
370
+ // 传递缓存信息
371
+ cacheReadTokens: usage.cache_read_input_tokens || 0,
372
+ cacheWriteTokens: usage.cache_creation_input_tokens || 0
373
+ }
374
+ },
375
+ ...raw
376
+ };
377
+ }
378
+ break;
379
+ case "message_delta":
380
+ if (data.usage) {
381
+ yield {
382
+ type: "metadata",
383
+ usagePhase: "output",
384
+ metadata: {
385
+ usage: {
386
+ promptTokens: 0,
387
+ completionTokens: data.usage.output_tokens,
388
+ totalTokens: data.usage.output_tokens
389
+ }
390
+ },
391
+ ...raw
392
+ };
393
+ }
394
+ break;
395
+ }
396
+ } catch {
397
+ }
398
+ }
399
+ }
400
+ yield { type: "done" };
401
+ } finally {
402
+ reader.releaseLock();
403
+ }
404
+ }
405
+ async complete(params) {
406
+ const body = this.buildRequestBody(params, false);
407
+ const response = await this.fetch("/v1/messages", body);
408
+ if (!response.ok) {
409
+ const error = await response.text();
410
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
411
+ }
412
+ const data = await response.json();
413
+ const result = {
414
+ content: ""
415
+ };
416
+ const toolCalls = [];
417
+ for (const block of data.content || []) {
418
+ if (block.type === "text") {
419
+ result.content += block.text;
420
+ } else if (block.type === "tool_use") {
421
+ toolCalls.push({
422
+ id: block.id,
423
+ name: block.name,
424
+ arguments: block.input
425
+ });
426
+ }
427
+ }
428
+ if (toolCalls.length > 0) {
429
+ result.toolCalls = toolCalls;
430
+ }
431
+ if (data.usage) {
432
+ const usage = data.usage;
433
+ const actualInputTokens = usage.input_tokens + (usage.cache_read_input_tokens || 0);
434
+ result.usage = {
435
+ promptTokens: actualInputTokens,
436
+ completionTokens: usage.output_tokens,
437
+ totalTokens: actualInputTokens + usage.output_tokens
438
+ };
439
+ }
440
+ return result;
441
+ }
442
+ buildRequestBody(params, stream) {
443
+ const { system, messages } = this.extractSystemMessage(params.messages);
444
+ const transformedMessages = this.transformAnthropicMessages(messages);
445
+ const body = {
446
+ model: this.model,
447
+ max_tokens: params.maxTokens || 4096,
448
+ messages: transformedMessages,
449
+ stream,
450
+ ...system && { system },
451
+ ...params.temperature !== void 0 && { temperature: params.temperature }
452
+ };
453
+ if (params.tools && params.tools.length > 0) {
454
+ body.tools = chunkCNSGZVRN_cjs.toolsToModelSchema(params.tools).map((tool) => ({
455
+ name: tool.name,
456
+ description: tool.description,
457
+ input_schema: tool.parameters
458
+ }));
459
+ }
460
+ return body;
461
+ }
462
+ extractSystemMessage(messages) {
463
+ const systemMessages = messages.filter((m) => m.role === "system");
464
+ const otherMessages = messages.filter((m) => m.role !== "system");
465
+ const combinedSystem = systemMessages.length > 0 ? systemMessages.map((m) => m.content).join("\n\n") : void 0;
466
+ return {
467
+ system: combinedSystem,
468
+ messages: otherMessages
469
+ };
470
+ }
471
+ transformAnthropicMessages(messages) {
472
+ return messages.map((msg) => {
473
+ const transformed = {
474
+ role: msg.role === "assistant" ? "assistant" : "user",
475
+ content: []
476
+ };
477
+ if (typeof msg.content === "string") {
478
+ transformed.content = [{ type: "text", text: msg.content }];
479
+ } else if (Array.isArray(msg.content)) {
480
+ const contentParts = [];
481
+ for (const part of msg.content) {
482
+ if (part.type === "thinking") {
483
+ contentParts.push(part);
484
+ } else if (part.type === "text") {
485
+ contentParts.push({ type: "text", text: part.text });
486
+ } else {
487
+ contentParts.push(part);
488
+ }
489
+ }
490
+ transformed.content = contentParts;
491
+ if (contentParts.length === 0) {
492
+ transformed.content = "";
493
+ }
494
+ }
495
+ if (msg.toolCalls && msg.role === "assistant") {
496
+ for (const tc of msg.toolCalls) {
497
+ transformed.content.push({
498
+ type: "tool_use",
499
+ id: tc.id,
500
+ name: tc.name,
501
+ input: tc.arguments
502
+ });
503
+ }
504
+ }
505
+ if (msg.role === "tool" && msg.toolCallId) {
506
+ transformed.role = "user";
507
+ transformed.content = [{
508
+ type: "tool_result",
509
+ tool_use_id: msg.toolCallId,
510
+ content: msg.content
511
+ }];
512
+ }
513
+ return transformed;
514
+ });
515
+ }
516
+ async fetch(path, body, signal) {
517
+ return globalThis.fetch(`${this.baseUrl}${path}`, {
518
+ method: "POST",
519
+ headers: {
520
+ "Content-Type": "application/json",
521
+ "x-api-key": this.apiKey,
522
+ "anthropic-version": this.version
523
+ },
524
+ body: JSON.stringify(body),
525
+ signal
526
+ });
527
+ }
528
+ safeParseJSON(str) {
529
+ try {
530
+ return JSON.parse(str);
531
+ } catch {
532
+ return str;
533
+ }
534
+ }
535
+ };
536
+ function createAnthropic(config) {
537
+ return new AnthropicAdapter(config);
538
+ }
539
+
540
+ // src/models/ollama.ts
541
+ var OLLAMA_CAPABILITIES = {
542
+ "qwen3.5:0.8b": { contextLength: 32768, maxOutputTokens: 4096 },
543
+ "minimax-m2.7:cloud": { contextLength: 128e3, maxOutputTokens: 16384 },
544
+ "nemotron-3-super:cloud": { contextLength: 128e3, maxOutputTokens: 16384 },
545
+ "glm-5:cloud": { contextLength: 128e3, maxOutputTokens: 16384 }
546
+ };
547
+ function ollamaStreamChunksFromChatData(data, parseToolArguments, nextToolCallId) {
548
+ const chunks = [];
549
+ const msg = data.message;
550
+ if (!msg) return chunks;
551
+ const thinking = msg.thinking;
552
+ if (typeof thinking === "string" && thinking.length > 0) {
553
+ chunks.push({ type: "thinking", content: thinking });
554
+ }
555
+ const content = msg.content;
556
+ if (typeof content === "string" && content.length > 0) {
557
+ chunks.push({ type: "text", content });
558
+ }
559
+ const toolCalls = msg.tool_calls;
560
+ if (toolCalls && Array.isArray(toolCalls)) {
561
+ for (const tc of toolCalls) {
562
+ const t = tc;
563
+ const fn = t.function;
564
+ chunks.push({
565
+ type: "tool_call",
566
+ toolCall: {
567
+ id: nextToolCallId(),
568
+ name: (typeof fn?.name === "string" ? fn.name : "") || "",
569
+ arguments: parseToolArguments(fn?.arguments)
570
+ }
571
+ });
572
+ }
573
+ }
574
+ return chunks;
575
+ }
576
+ function ollamaMessageContentToApiString(content) {
577
+ if (typeof content === "string") return content;
578
+ if (!Array.isArray(content)) return "";
579
+ const texts = [];
580
+ for (const part of content) {
581
+ if (part.type === "text") {
582
+ texts.push(part.text);
583
+ }
584
+ }
585
+ return texts.join("\n\n");
586
+ }
587
+ function uniqueOllamaToolCallId(batchMs, index) {
588
+ return `ollama_${batchMs}_${index}_${Math.random().toString(36).slice(2, 11)}`;
589
+ }
590
+ var OllamaAdapter = class extends chunkCNSGZVRN_cjs.BaseModelAdapter {
591
+ name;
592
+ baseUrl;
593
+ model;
594
+ think;
595
+ constructor(config = {}) {
596
+ super();
597
+ this.baseUrl = config.baseUrl || process.env.OLLAMA_BASE_URL || "http://localhost:11434";
598
+ this.model = config.model || "qwen3.5:0.8b";
599
+ this.think = config.think;
600
+ this.name = `ollama/${this.model}`;
601
+ this.capabilities = config.capabilities ?? OLLAMA_CAPABILITIES[this.model] ?? { contextLength: 4096, maxOutputTokens: 2048 };
602
+ }
603
+ async *stream(params) {
604
+ const body = this.buildRequestBody(params, true);
605
+ const response = await this.fetch("/api/chat", body, params.signal);
606
+ if (!response.ok) {
607
+ const error = await response.text();
608
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
609
+ }
610
+ const reader = response.body?.getReader();
611
+ if (!reader) {
612
+ throw new Error("No response body");
613
+ }
614
+ const decoder = new TextDecoder();
615
+ let buffer = "";
616
+ const nextToolCallId = () => `ollama_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`;
617
+ try {
618
+ while (true) {
619
+ if (params.signal?.aborted) {
620
+ reader.cancel();
621
+ break;
622
+ }
623
+ const { done, value } = await reader.read();
624
+ if (done) break;
625
+ buffer += decoder.decode(value, { stream: true });
626
+ const lines = buffer.split("\n");
627
+ buffer = lines.pop() || "";
628
+ for (const line of lines) {
629
+ const trimmed = line.trim();
630
+ if (!trimmed) continue;
631
+ try {
632
+ const data = JSON.parse(trimmed);
633
+ const raw = params.includeRawStreamEvents ? { providerRaw: data } : {};
634
+ const messageChunks = ollamaStreamChunksFromChatData(
635
+ data,
636
+ (args) => this.parseToolArguments(args),
637
+ nextToolCallId
638
+ );
639
+ for (const chunk of messageChunks) {
640
+ yield { ...chunk, ...raw };
641
+ }
642
+ if (data.done) {
643
+ if (data.prompt_eval_count || data.eval_count) {
644
+ yield {
645
+ type: "metadata",
646
+ usagePhase: "output",
647
+ metadata: {
648
+ usage: {
649
+ promptTokens: data.prompt_eval_count || 0,
650
+ completionTokens: data.eval_count || 0,
651
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0)
652
+ }
653
+ },
654
+ ...raw
655
+ };
656
+ }
657
+ yield { type: "done", ...raw };
658
+ }
659
+ } catch {
660
+ }
661
+ }
662
+ }
663
+ } finally {
664
+ reader.releaseLock();
665
+ }
666
+ }
667
+ async complete(params) {
668
+ const body = this.buildRequestBody(params, false);
669
+ const response = await this.fetch("/api/chat", body);
670
+ if (!response.ok) {
671
+ const error = await response.text();
672
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
673
+ }
674
+ const data = await response.json();
675
+ const result = {
676
+ content: data.message?.content || ""
677
+ };
678
+ const thinking = data.message?.thinking;
679
+ if (typeof thinking === "string" && thinking.length > 0) {
680
+ result.thinking = thinking;
681
+ }
682
+ if (data.message?.tool_calls) {
683
+ const batchMs = Date.now();
684
+ result.toolCalls = data.message.tool_calls.map((tc, index) => ({
685
+ id: uniqueOllamaToolCallId(batchMs, index),
686
+ name: tc.function?.name || "",
687
+ arguments: this.parseToolArguments(tc.function?.arguments)
688
+ }));
689
+ }
690
+ if (data.prompt_eval_count || data.eval_count) {
691
+ result.usage = {
692
+ promptTokens: data.prompt_eval_count || 0,
693
+ completionTokens: data.eval_count || 0,
694
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0)
695
+ };
696
+ }
697
+ return result;
698
+ }
699
+ parseToolArguments(args) {
700
+ if (args == null) return {};
701
+ if (typeof args === "object" && !Array.isArray(args)) return args;
702
+ if (typeof args === "string") {
703
+ try {
704
+ const parsed = JSON.parse(args);
705
+ return typeof parsed === "object" && parsed !== null ? parsed : { value: parsed };
706
+ } catch {
707
+ return {};
708
+ }
709
+ }
710
+ return {};
711
+ }
712
+ /**
713
+ * Ollama 要求 tool_calls.function.arguments 为对象,而非 JSON 字符串。
714
+ * 工具结果消息使用 tool_name(见 https://docs.ollama.com/capabilities/tool-calling ),非 OpenAI 的 tool_call_id。
715
+ */
716
+ transformMessages(messages) {
717
+ const toolCallIdToName = /* @__PURE__ */ new Map();
718
+ for (const msg of messages) {
719
+ if (msg.role === "assistant" && msg.toolCalls) {
720
+ for (const tc of msg.toolCalls) {
721
+ toolCallIdToName.set(tc.id, tc.name);
722
+ }
723
+ }
724
+ }
725
+ return messages.map((msg) => {
726
+ if (msg.role === "tool" && msg.toolCallId) {
727
+ const toolName = toolCallIdToName.get(msg.toolCallId) ?? msg.name;
728
+ return {
729
+ role: "tool",
730
+ content: ollamaMessageContentToApiString(msg.content),
731
+ ...toolName && { tool_name: toolName }
732
+ };
733
+ }
734
+ return {
735
+ role: msg.role,
736
+ content: ollamaMessageContentToApiString(msg.content),
737
+ ...msg.toolCalls && { tool_calls: msg.toolCalls.map((tc) => ({
738
+ id: tc.id,
739
+ type: "function",
740
+ function: {
741
+ name: tc.name,
742
+ arguments: this.parseToolArguments(tc.arguments)
743
+ }
744
+ })) }
745
+ };
746
+ });
747
+ }
748
+ buildRequestBody(params, stream) {
749
+ const body = {
750
+ model: this.model,
751
+ messages: this.transformMessages(params.messages),
752
+ stream,
753
+ ...params.temperature !== void 0 && { options: { temperature: params.temperature } }
754
+ };
755
+ if (this.think !== void 0) {
756
+ body.think = this.think;
757
+ }
758
+ if (params.tools && params.tools.length > 0) {
759
+ body.tools = chunkCNSGZVRN_cjs.toolsToModelSchema(params.tools).map((tool) => ({
760
+ type: "function",
761
+ function: tool
762
+ }));
763
+ }
764
+ return body;
765
+ }
766
+ async fetch(path, body, signal) {
767
+ return globalThis.fetch(`${this.baseUrl}${path}`, {
768
+ method: "POST",
769
+ headers: {
770
+ "Content-Type": "application/json"
771
+ },
772
+ body: JSON.stringify(body),
773
+ signal
774
+ });
775
+ }
776
+ };
777
+ function createOllama(config) {
778
+ return new OllamaAdapter(config);
779
+ }
780
+
781
+ // src/models/index.ts
782
+ function createModel(config) {
783
+ switch (config.provider) {
784
+ case "openai":
785
+ return new OpenAIAdapter({
786
+ apiKey: config.apiKey,
787
+ baseUrl: config.baseUrl,
788
+ model: config.model
789
+ });
790
+ case "anthropic":
791
+ return new AnthropicAdapter({
792
+ apiKey: config.apiKey,
793
+ baseUrl: config.baseUrl,
794
+ model: config.model
795
+ });
796
+ case "ollama":
797
+ return new OllamaAdapter({
798
+ baseUrl: config.baseUrl,
799
+ model: config.model,
800
+ think: config.think
801
+ });
802
+ default:
803
+ throw new Error(`Unknown model provider: ${config.provider}`);
804
+ }
805
+ }
806
+
807
+ exports.AnthropicAdapter = AnthropicAdapter;
808
+ exports.OllamaAdapter = OllamaAdapter;
809
+ exports.OpenAIAdapter = OpenAIAdapter;
810
+ exports.createAnthropic = createAnthropic;
811
+ exports.createModel = createModel;
812
+ exports.createOllama = createOllama;
813
+ exports.createOpenAI = createOpenAI;
814
+ exports.ollamaMessageContentToApiString = ollamaMessageContentToApiString;
815
+ exports.ollamaStreamChunksFromChatData = ollamaStreamChunksFromChatData;
816
+ //# sourceMappingURL=chunk-X35MHWXE.cjs.map
817
+ //# sourceMappingURL=chunk-X35MHWXE.cjs.map