@ddlqhd/agent-sdk 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/README.md +4 -2
  2. package/dist/{chunk-5QMA2YBY.cjs → chunk-6X7EYQLS.cjs} +782 -114
  3. package/dist/chunk-6X7EYQLS.cjs.map +1 -0
  4. package/dist/{chunk-NDSL7NPN.js → chunk-D3UZNLZO.js} +769 -71
  5. package/dist/chunk-D3UZNLZO.js.map +1 -0
  6. package/dist/{chunk-Q3SOMX26.js → chunk-EQ5CXH44.js} +772 -111
  7. package/dist/chunk-EQ5CXH44.js.map +1 -0
  8. package/dist/chunk-LOYIGOBZ.js +54 -0
  9. package/dist/chunk-LOYIGOBZ.js.map +1 -0
  10. package/dist/{chunk-OHXW2YM6.js → chunk-MEJHTQJM.js} +289 -166
  11. package/dist/chunk-MEJHTQJM.js.map +1 -0
  12. package/dist/chunk-NYZD3THB.cjs +1521 -0
  13. package/dist/chunk-NYZD3THB.cjs.map +1 -0
  14. package/dist/chunk-OZO7D77N.cjs +59 -0
  15. package/dist/chunk-OZO7D77N.cjs.map +1 -0
  16. package/dist/{chunk-JF5AJQMU.cjs → chunk-Z45DHTDX.cjs} +291 -170
  17. package/dist/chunk-Z45DHTDX.cjs.map +1 -0
  18. package/dist/cli/index.cjs +47 -39
  19. package/dist/cli/index.cjs.map +1 -1
  20. package/dist/cli/index.js +22 -14
  21. package/dist/cli/index.js.map +1 -1
  22. package/dist/{index-DPsZ1zat.d.ts → index-Cw3SfEAB.d.ts} +20 -34
  23. package/dist/{index-RTPmFjMp.d.cts → index-D2Qntkn_.d.cts} +20 -34
  24. package/dist/index.cjs +125 -89
  25. package/dist/index.d.cts +62 -22
  26. package/dist/index.d.ts +62 -22
  27. package/dist/index.js +4 -4
  28. package/dist/models/index.cjs +19 -15
  29. package/dist/models/index.d.cts +55 -6
  30. package/dist/models/index.d.ts +55 -6
  31. package/dist/models/index.js +2 -2
  32. package/dist/tools/index.cjs +53 -61
  33. package/dist/tools/index.d.cts +3 -3
  34. package/dist/tools/index.d.ts +3 -3
  35. package/dist/tools/index.js +2 -2
  36. package/dist/{types-C0aX_Qdp.d.cts → types-CWPAYWzr.d.cts} +307 -61
  37. package/dist/{types-C0aX_Qdp.d.ts → types-CWPAYWzr.d.ts} +307 -61
  38. package/package.json +25 -14
  39. package/dist/chunk-5QMA2YBY.cjs.map +0 -1
  40. package/dist/chunk-CNSGZVRN.cjs +0 -152
  41. package/dist/chunk-CNSGZVRN.cjs.map +0 -1
  42. package/dist/chunk-JF5AJQMU.cjs.map +0 -1
  43. package/dist/chunk-NDSL7NPN.js.map +0 -1
  44. package/dist/chunk-OHXW2YM6.js.map +0 -1
  45. package/dist/chunk-Q3SOMX26.js.map +0 -1
  46. package/dist/chunk-WH3APNQ5.js +0 -147
  47. package/dist/chunk-WH3APNQ5.js.map +0 -1
  48. package/dist/chunk-X35MHWXE.cjs +0 -817
  49. package/dist/chunk-X35MHWXE.cjs.map +0 -1
@@ -0,0 +1,1521 @@
1
+ #!/usr/bin/env node
2
+ 'use strict';
3
+
4
+ var chunkOZO7D77N_cjs = require('./chunk-OZO7D77N.cjs');
5
+ var crypto = require('crypto');
6
+
7
+ // src/models/default-capabilities.ts
8
+ var DEFAULT_ADAPTER_CAPABILITIES = {
9
+ contextLength: 2e5,
10
+ maxOutputTokens: 32e3
11
+ };
12
+
13
+ // src/core/logger.ts
14
+ var TRUTHY = /^(1|true|yes)$/i;
15
+ var DEFAULT_MAX_BODY_CHARS = 4e3;
16
+ var DEFAULT_REDACT_KEYS = [
17
+ "authorization",
18
+ "proxy-authorization",
19
+ "x-api-key",
20
+ "api-key",
21
+ "apikey",
22
+ "api_key",
23
+ "cookie",
24
+ "set-cookie",
25
+ "token",
26
+ "access_token",
27
+ "refresh_token",
28
+ "password",
29
+ "secret"
30
+ ];
31
+ var LEVEL_PRIORITY = {
32
+ debug: 10,
33
+ info: 20,
34
+ warn: 30,
35
+ error: 40,
36
+ silent: 99
37
+ };
38
+ function parseEnvLogLevel(raw) {
39
+ switch ((raw ?? "").trim().toLowerCase()) {
40
+ case "debug":
41
+ case "info":
42
+ case "warn":
43
+ case "error":
44
+ case "silent":
45
+ return (raw ?? "").trim().toLowerCase();
46
+ default:
47
+ return void 0;
48
+ }
49
+ }
50
+ function parseBooleanEnv(name) {
51
+ const raw = process.env[name];
52
+ if (raw == null || raw === "") {
53
+ return void 0;
54
+ }
55
+ if (TRUTHY.test(raw.trim())) {
56
+ return true;
57
+ }
58
+ if (/^(0|false|no)$/i.test(raw.trim())) {
59
+ return false;
60
+ }
61
+ return void 0;
62
+ }
63
+ function parseNumericEnv(name) {
64
+ const raw = process.env[name];
65
+ if (raw == null || raw === "") {
66
+ return void 0;
67
+ }
68
+ const value = Number(raw);
69
+ return Number.isFinite(value) ? value : void 0;
70
+ }
71
+ function resolveSDKLogLevel(level, hasLogger = false) {
72
+ if (level != null) {
73
+ return level;
74
+ }
75
+ const fromEnv = parseEnvLogLevel(process.env.AGENT_SDK_LOG_LEVEL);
76
+ if (fromEnv != null) {
77
+ return fromEnv;
78
+ }
79
+ if (hasLogger) {
80
+ return "info";
81
+ }
82
+ return "silent";
83
+ }
84
+ function resolveLogRedaction(config) {
85
+ const envIncludeBodies = parseBooleanEnv("AGENT_SDK_LOG_BODIES");
86
+ const envIncludeToolArgs = parseBooleanEnv("AGENT_SDK_LOG_INCLUDE_TOOL_ARGS");
87
+ const envMaxBodyChars = parseNumericEnv("AGENT_SDK_LOG_MAX_BODY_CHARS");
88
+ return {
89
+ includeBodies: config?.includeBodies ?? envIncludeBodies ?? false,
90
+ includeToolArguments: config?.includeToolArguments ?? envIncludeToolArgs ?? false,
91
+ maxBodyChars: Math.max(
92
+ 0,
93
+ Math.floor(config?.maxBodyChars ?? envMaxBodyChars ?? DEFAULT_MAX_BODY_CHARS)
94
+ ),
95
+ redactKeys: [
96
+ ...DEFAULT_REDACT_KEYS,
97
+ ...config?.redactKeys ?? []
98
+ ]
99
+ };
100
+ }
101
+ function shouldEmitLog(configuredLevel, hasLogger, eventLevel) {
102
+ const effectiveLevel = resolveSDKLogLevel(configuredLevel, hasLogger);
103
+ return LEVEL_PRIORITY[eventLevel] >= LEVEL_PRIORITY[effectiveLevel];
104
+ }
105
+ function truncateString(value, maxChars) {
106
+ if (maxChars <= 0 || value.length <= maxChars) {
107
+ return value;
108
+ }
109
+ return `${value.slice(0, maxChars)}... [truncated ${value.length - maxChars} chars]`;
110
+ }
111
+ function isSensitiveKey(key, redaction) {
112
+ if (key == null || redaction == null) {
113
+ return false;
114
+ }
115
+ const normalized = key.toLowerCase();
116
+ return redaction.redactKeys.some((candidate) => candidate.toLowerCase() === normalized);
117
+ }
118
+ function sanitizeObjectEntries(entries, redaction) {
119
+ const output = {};
120
+ for (const [key, value] of entries) {
121
+ if (isSensitiveKey(key, redaction)) {
122
+ output[key] = "[REDACTED]";
123
+ continue;
124
+ }
125
+ if (key === "messages" && !redaction.includeBodies && Array.isArray(value)) {
126
+ output[key] = `[REDACTED_MESSAGES:${value.length}]`;
127
+ continue;
128
+ }
129
+ if ((key === "arguments" || key === "input") && !redaction.includeToolArguments) {
130
+ output[key] = "[REDACTED_TOOL_ARGUMENTS]";
131
+ continue;
132
+ }
133
+ output[key] = sanitizeForLogging(value, redaction, key);
134
+ }
135
+ return output;
136
+ }
137
+ function sanitizeForLogging(value, redaction, key) {
138
+ if (isSensitiveKey(key, redaction)) {
139
+ return "[REDACTED]";
140
+ }
141
+ if (typeof value === "string") {
142
+ if (!redaction.includeBodies && (key === "content" || key === "text" || key === "thinking")) {
143
+ return "[REDACTED_BODY]";
144
+ }
145
+ return truncateString(value, redaction.maxBodyChars);
146
+ }
147
+ if (typeof value === "number" || typeof value === "boolean" || value == null) {
148
+ return value;
149
+ }
150
+ if (Array.isArray(value)) {
151
+ if (!redaction.includeBodies && key === "messages") {
152
+ return `[REDACTED_MESSAGES:${value.length}]`;
153
+ }
154
+ return value.map((item) => sanitizeForLogging(item, redaction));
155
+ }
156
+ if (typeof value === "object") {
157
+ return sanitizeObjectEntries(Object.entries(value), redaction);
158
+ }
159
+ return String(value);
160
+ }
161
+ function formatSDKLog(event) {
162
+ const prefix = `[agent-sdk][${event.component}][${event.event}]`;
163
+ const details = [];
164
+ if (event.provider) details.push(`provider=${event.provider}`);
165
+ if (event.model) details.push(`model=${event.model}`);
166
+ if (event.sessionId) details.push(`sessionId=${event.sessionId}`);
167
+ if (event.iteration !== void 0) details.push(`iteration=${event.iteration}`);
168
+ if (event.statusCode !== void 0) details.push(`statusCode=${event.statusCode}`);
169
+ if (event.durationMs !== void 0) details.push(`durationMs=${event.durationMs}`);
170
+ if (event.toolName) details.push(`tool=${event.toolName}`);
171
+ if (event.requestId) details.push(`requestId=${event.requestId}`);
172
+ if (event.clientRequestId) details.push(`clientRequestId=${event.clientRequestId}`);
173
+ const suffix = details.length > 0 ? ` ${details.join(" ")}` : "";
174
+ return event.message ? `${prefix} ${event.message}${suffix}` : `${prefix}${suffix}`;
175
+ }
176
+ function consoleMethod(level) {
177
+ if (level === "error") return console.error.bind(console);
178
+ if (level === "warn") return console.warn.bind(console);
179
+ if (level === "info") return console.info.bind(console);
180
+ return console.debug.bind(console);
181
+ }
182
+ function createConsoleSDKLogger() {
183
+ const write = (level, event) => {
184
+ const line = formatSDKLog(event);
185
+ const logFn = consoleMethod(level);
186
+ if (event.metadata != null) {
187
+ logFn(line, event.metadata);
188
+ } else {
189
+ logFn(line);
190
+ }
191
+ };
192
+ return {
193
+ debug(event) {
194
+ write("debug", event);
195
+ },
196
+ info(event) {
197
+ write("info", event);
198
+ },
199
+ warn(event) {
200
+ write("warn", event);
201
+ },
202
+ error(event) {
203
+ write("error", event);
204
+ }
205
+ };
206
+ }
207
+ function emitSDKLog(args) {
208
+ if (!shouldEmitLog(args.logLevel, args.logger != null, args.level)) {
209
+ return;
210
+ }
211
+ const logger = args.logger ?? createConsoleSDKLogger();
212
+ const payload = {
213
+ source: "agent-sdk",
214
+ ...args.event
215
+ };
216
+ logger[args.level]?.(payload);
217
+ }
218
+ function extractProviderRequestId(headers) {
219
+ if (headers == null) {
220
+ return void 0;
221
+ }
222
+ return headers.get("x-request-id") ?? headers.get("request-id") ?? headers.get("x-amzn-requestid") ?? void 0;
223
+ }
224
+
225
+ // src/models/model-request-log.ts
226
+ function countMessages(body) {
227
+ if (body == null || typeof body !== "object" || !("messages" in body)) {
228
+ return void 0;
229
+ }
230
+ const messages = body.messages;
231
+ return Array.isArray(messages) ? messages.length : void 0;
232
+ }
233
+ function countTools(body) {
234
+ if (body == null || typeof body !== "object" || !("tools" in body)) {
235
+ return void 0;
236
+ }
237
+ const tools = body.tools;
238
+ return Array.isArray(tools) ? tools.length : void 0;
239
+ }
240
+ function buildRequestMetadata(body, params) {
241
+ const redaction = resolveLogRedaction(params?.redaction);
242
+ const metadata = {};
243
+ const messageCount = countMessages(body);
244
+ const toolCount = countTools(body);
245
+ if (messageCount !== void 0) metadata.messageCount = messageCount;
246
+ if (toolCount !== void 0) metadata.toolCount = toolCount;
247
+ if (redaction.includeBodies) {
248
+ metadata.requestBody = sanitizeForLogging(body, redaction);
249
+ }
250
+ return metadata;
251
+ }
252
+ function logModelRequestStart(context, body, extraMetadata) {
253
+ const state = {
254
+ clientRequestId: crypto.randomUUID(),
255
+ startedAt: Date.now()
256
+ };
257
+ emitSDKLog({
258
+ logger: context.params?.logger,
259
+ logLevel: context.params?.logLevel,
260
+ level: "info",
261
+ event: {
262
+ component: "model",
263
+ event: "model.request.start",
264
+ message: `Starting ${context.operation} request`,
265
+ provider: context.provider,
266
+ model: context.model,
267
+ operation: context.operation,
268
+ sessionId: context.params?.sessionId,
269
+ iteration: context.iteration,
270
+ clientRequestId: state.clientRequestId,
271
+ metadata: {
272
+ path: context.path,
273
+ ...buildRequestMetadata(body, context.params),
274
+ ...extraMetadata
275
+ }
276
+ }
277
+ });
278
+ return state;
279
+ }
280
+ function logModelRequestEnd(context, state, response, extraMetadata) {
281
+ emitSDKLog({
282
+ logger: context.params?.logger,
283
+ logLevel: context.params?.logLevel,
284
+ level: response.ok ? "info" : "warn",
285
+ event: {
286
+ component: "model",
287
+ event: response.ok ? "model.request.end" : "model.request.error",
288
+ message: response.ok ? "Model request completed" : "Model request returned error response",
289
+ provider: context.provider,
290
+ model: context.model,
291
+ operation: context.operation,
292
+ sessionId: context.params?.sessionId,
293
+ iteration: context.iteration,
294
+ clientRequestId: state.clientRequestId,
295
+ requestId: extractProviderRequestId(response.headers),
296
+ statusCode: response.status,
297
+ durationMs: Date.now() - state.startedAt,
298
+ metadata: {
299
+ path: context.path,
300
+ ...extraMetadata
301
+ }
302
+ }
303
+ });
304
+ }
305
+ function logModelRequestFailure(context, state, error, extraMetadata) {
306
+ const err = error instanceof Error ? error : new Error(String(error));
307
+ emitSDKLog({
308
+ logger: context.params?.logger,
309
+ logLevel: context.params?.logLevel,
310
+ level: err.name === "AbortError" ? "info" : "error",
311
+ event: {
312
+ component: "model",
313
+ event: err.name === "AbortError" ? "model.request.aborted" : "model.request.error",
314
+ message: err.name === "AbortError" ? "Model request aborted" : "Model request failed",
315
+ provider: context.provider,
316
+ model: context.model,
317
+ operation: context.operation,
318
+ sessionId: context.params?.sessionId,
319
+ iteration: context.iteration,
320
+ clientRequestId: state.clientRequestId,
321
+ durationMs: Date.now() - state.startedAt,
322
+ errorName: err.name,
323
+ errorMessage: err.message,
324
+ metadata: {
325
+ path: context.path,
326
+ ...extraMetadata
327
+ }
328
+ }
329
+ });
330
+ }
331
+ function logModelStreamParseError(context, rawChunk, error) {
332
+ const err = error instanceof Error ? error : new Error(String(error));
333
+ const redaction = resolveLogRedaction(context.params?.redaction);
334
+ emitSDKLog({
335
+ logger: context.params?.logger,
336
+ logLevel: context.params?.logLevel,
337
+ level: "warn",
338
+ event: {
339
+ component: "streaming",
340
+ event: "model.stream.parse_error",
341
+ message: "Failed to parse provider stream chunk",
342
+ provider: context.provider,
343
+ model: context.model,
344
+ operation: context.operation,
345
+ sessionId: context.params?.sessionId,
346
+ iteration: context.iteration,
347
+ errorName: err.name,
348
+ errorMessage: err.message,
349
+ metadata: {
350
+ path: context.path,
351
+ rawChunk: sanitizeForLogging(rawChunk, redaction)
352
+ }
353
+ }
354
+ });
355
+ }
356
+
357
+ // src/models/openai.ts
358
+ var OpenAIAdapter = class extends chunkOZO7D77N_cjs.BaseModelAdapter {
359
+ name;
360
+ apiKey;
361
+ baseUrl;
362
+ model;
363
+ organization;
364
+ constructor(config = {}) {
365
+ super();
366
+ this.apiKey = config.apiKey || process.env.OPENAI_API_KEY || "";
367
+ this.baseUrl = config.baseUrl || process.env.OPENAI_BASE_URL || "https://api.openai.com/v1";
368
+ this.model = config.model || "gpt-4o";
369
+ this.organization = config.organization || process.env.OPENAI_ORG_ID;
370
+ if (!this.apiKey) {
371
+ throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass apiKey in config.");
372
+ }
373
+ this.name = `openai/${this.model}`;
374
+ this.capabilities = config.capabilities ?? DEFAULT_ADAPTER_CAPABILITIES;
375
+ }
376
+ async *stream(params) {
377
+ const body = this.buildRequestBody(params, true);
378
+ const response = await this.fetch("/chat/completions", body, "stream", params);
379
+ if (!response.ok) {
380
+ const error = await response.text();
381
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
382
+ }
383
+ const reader = response.body?.getReader();
384
+ if (!reader) {
385
+ throw new Error("No response body");
386
+ }
387
+ const decoder = new TextDecoder();
388
+ let buffer = "";
389
+ let currentToolCall = null;
390
+ try {
391
+ while (true) {
392
+ if (params.signal?.aborted) {
393
+ reader.cancel();
394
+ break;
395
+ }
396
+ const { done, value } = await reader.read();
397
+ if (done) break;
398
+ buffer += decoder.decode(value, { stream: true });
399
+ const lines = buffer.split("\n");
400
+ buffer = lines.pop() || "";
401
+ for (const line of lines) {
402
+ const trimmed = line.trim();
403
+ if (!trimmed || trimmed === "data: [DONE]") continue;
404
+ if (!trimmed.startsWith("data: ")) continue;
405
+ try {
406
+ const data = JSON.parse(trimmed.slice(6));
407
+ const choice = data.choices?.[0];
408
+ if (!choice) continue;
409
+ const raw = params.includeRawStreamEvents ? { providerRaw: data } : {};
410
+ if (choice.delta?.content) {
411
+ yield { type: "text", content: choice.delta.content, ...raw };
412
+ }
413
+ if (choice.delta?.tool_calls) {
414
+ for (const toolCall of choice.delta.tool_calls) {
415
+ if (toolCall.index !== void 0) {
416
+ if (toolCall.id && toolCall.function?.name) {
417
+ if (currentToolCall) {
418
+ yield {
419
+ type: "tool_call",
420
+ toolCall: {
421
+ id: currentToolCall.id,
422
+ name: currentToolCall.name,
423
+ arguments: this.safeParseJSON(currentToolCall.arguments)
424
+ },
425
+ ...raw
426
+ };
427
+ }
428
+ currentToolCall = {
429
+ id: toolCall.id,
430
+ name: toolCall.function.name,
431
+ arguments: toolCall.function.arguments || ""
432
+ };
433
+ yield {
434
+ type: "tool_call_start",
435
+ content: toolCall.function.name,
436
+ toolCallId: toolCall.id,
437
+ ...raw
438
+ };
439
+ } else if (toolCall.function?.arguments && currentToolCall) {
440
+ currentToolCall.arguments += toolCall.function.arguments;
441
+ yield {
442
+ type: "tool_call_delta",
443
+ content: toolCall.function.arguments,
444
+ toolCallId: currentToolCall.id,
445
+ ...raw
446
+ };
447
+ }
448
+ }
449
+ }
450
+ }
451
+ if (choice.finish_reason === "tool_calls" && currentToolCall) {
452
+ yield {
453
+ type: "tool_call",
454
+ toolCall: {
455
+ id: currentToolCall.id,
456
+ name: currentToolCall.name,
457
+ arguments: this.safeParseJSON(currentToolCall.arguments)
458
+ },
459
+ ...raw
460
+ };
461
+ currentToolCall = null;
462
+ }
463
+ if (data.usage) {
464
+ yield {
465
+ type: "metadata",
466
+ usagePhase: "output",
467
+ metadata: {
468
+ usage: {
469
+ promptTokens: data.usage.prompt_tokens,
470
+ completionTokens: data.usage.completion_tokens,
471
+ totalTokens: data.usage.total_tokens
472
+ }
473
+ },
474
+ ...raw
475
+ };
476
+ }
477
+ } catch (error) {
478
+ logModelStreamParseError(
479
+ {
480
+ provider: "openai",
481
+ model: this.model,
482
+ path: "/chat/completions",
483
+ operation: "stream",
484
+ params
485
+ },
486
+ trimmed,
487
+ error
488
+ );
489
+ }
490
+ }
491
+ }
492
+ if (currentToolCall) {
493
+ yield {
494
+ type: "tool_call",
495
+ toolCall: {
496
+ id: currentToolCall.id,
497
+ name: currentToolCall.name,
498
+ arguments: this.safeParseJSON(currentToolCall.arguments)
499
+ },
500
+ ...params.includeRawStreamEvents ? { providerRaw: { trailing: true } } : {}
501
+ };
502
+ }
503
+ yield { type: "done" };
504
+ } finally {
505
+ reader.releaseLock();
506
+ }
507
+ }
508
+ async complete(params) {
509
+ const body = this.buildRequestBody(params, false);
510
+ const response = await this.fetch("/chat/completions", body, "complete", params);
511
+ if (!response.ok) {
512
+ const error = await response.text();
513
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
514
+ }
515
+ const data = await response.json();
516
+ const choice = data.choices?.[0];
517
+ if (!choice) {
518
+ throw new Error("No completion choice returned");
519
+ }
520
+ const result = {
521
+ content: choice.message?.content || ""
522
+ };
523
+ if (choice.message?.tool_calls) {
524
+ result.toolCalls = choice.message.tool_calls.map((tc) => ({
525
+ id: tc.id,
526
+ name: tc.function.name,
527
+ arguments: this.safeParseJSON(tc.function.arguments)
528
+ }));
529
+ }
530
+ if (data.usage) {
531
+ result.usage = {
532
+ promptTokens: data.usage.prompt_tokens,
533
+ completionTokens: data.usage.completion_tokens,
534
+ totalTokens: data.usage.total_tokens
535
+ };
536
+ }
537
+ return result;
538
+ }
539
+ buildRequestBody(params, stream) {
540
+ const messages = this.transformMessages(params.messages);
541
+ const defaultMaxTokens = this.capabilities?.maxOutputTokens ?? DEFAULT_ADAPTER_CAPABILITIES.maxOutputTokens;
542
+ const body = {
543
+ model: this.model,
544
+ messages,
545
+ stream,
546
+ ...stream && { stream_options: { include_usage: true } },
547
+ ...params.temperature !== void 0 && { temperature: params.temperature },
548
+ max_tokens: params.maxTokens ?? defaultMaxTokens,
549
+ ...params.stopSequences && { stop: params.stopSequences }
550
+ };
551
+ if (params.tools && params.tools.length > 0) {
552
+ body.tools = chunkOZO7D77N_cjs.toolsToModelSchema(params.tools).map((tool) => ({
553
+ type: "function",
554
+ function: tool
555
+ }));
556
+ }
557
+ return body;
558
+ }
559
+ async fetch(path, body, operation, params) {
560
+ const requestLog = logModelRequestStart({
561
+ provider: "openai",
562
+ model: this.model,
563
+ path,
564
+ operation,
565
+ params
566
+ }, body);
567
+ const headers = {
568
+ "Content-Type": "application/json",
569
+ "Authorization": `Bearer ${this.apiKey}`,
570
+ "X-Client-Request-Id": requestLog.clientRequestId
571
+ };
572
+ if (this.organization) {
573
+ headers["OpenAI-Organization"] = this.organization;
574
+ }
575
+ try {
576
+ const response = await globalThis.fetch(`${this.baseUrl}${path}`, {
577
+ method: "POST",
578
+ headers,
579
+ body: JSON.stringify(body),
580
+ signal: params.signal
581
+ });
582
+ logModelRequestEnd(
583
+ {
584
+ provider: "openai",
585
+ model: this.model,
586
+ path,
587
+ operation,
588
+ params
589
+ },
590
+ requestLog,
591
+ response
592
+ );
593
+ return response;
594
+ } catch (error) {
595
+ logModelRequestFailure(
596
+ {
597
+ provider: "openai",
598
+ model: this.model,
599
+ path,
600
+ operation,
601
+ params
602
+ },
603
+ requestLog,
604
+ error
605
+ );
606
+ throw error;
607
+ }
608
+ }
609
+ safeParseJSON(str) {
610
+ try {
611
+ return JSON.parse(str);
612
+ } catch {
613
+ return str;
614
+ }
615
+ }
616
+ };
617
+ function createOpenAI(config) {
618
+ return new OpenAIAdapter(config);
619
+ }
620
+
621
+ // src/models/anthropic.ts
622
+ var DEFAULT_FETCH_RETRY = {
623
+ maxAttempts: 2,
624
+ baseDelayMs: 200,
625
+ maxDelayMs: 2e3
626
+ };
627
+ function normalizeFetchRetry(options) {
628
+ if (options == null) {
629
+ return { ...DEFAULT_FETCH_RETRY };
630
+ }
631
+ const maxAttempts = Math.max(1, Math.floor(options.maxAttempts ?? DEFAULT_FETCH_RETRY.maxAttempts));
632
+ const baseDelayMs = Math.max(0, options.baseDelayMs ?? DEFAULT_FETCH_RETRY.baseDelayMs);
633
+ const maxDelayMs = Math.max(baseDelayMs, options.maxDelayMs ?? DEFAULT_FETCH_RETRY.maxDelayMs);
634
+ return { maxAttempts, baseDelayMs, maxDelayMs };
635
+ }
636
+ function isAbortError(e) {
637
+ if (e instanceof DOMException && e.name === "AbortError") {
638
+ return true;
639
+ }
640
+ return typeof e === "object" && e !== null && e.name === "AbortError";
641
+ }
642
+ function isRetriableFetchError(e) {
643
+ if (isAbortError(e)) {
644
+ return false;
645
+ }
646
+ if (e instanceof TypeError) {
647
+ return true;
648
+ }
649
+ const cause = typeof e === "object" && e !== null && "cause" in e ? e.cause : void 0;
650
+ const code = cause?.code;
651
+ return code === "ECONNRESET" || code === "ETIMEDOUT" || code === "EPIPE" || code === "UND_ERR_SOCKET";
652
+ }
653
+ function isRetriableHttpStatus(status) {
654
+ return status === 429 || status === 502 || status === 503 || status === 504;
655
+ }
656
+ function parseRetryAfterMs(header) {
657
+ if (header == null || header === "") {
658
+ return void 0;
659
+ }
660
+ const trimmed = header.trim();
661
+ const asNum = Number(trimmed);
662
+ if (Number.isFinite(asNum) && asNum >= 0) {
663
+ return asNum * 1e3;
664
+ }
665
+ const parsed = Date.parse(trimmed);
666
+ if (!Number.isNaN(parsed)) {
667
+ const delta = parsed - Date.now();
668
+ return delta > 0 ? delta : 0;
669
+ }
670
+ return void 0;
671
+ }
672
+ function computeBackoffMs(attemptIndex, baseDelayMs, maxDelayMs) {
673
+ const exp = Math.min(maxDelayMs, baseDelayMs * 2 ** attemptIndex);
674
+ const jitter = 0.5 + Math.random() * 0.5;
675
+ return Math.min(maxDelayMs, Math.floor(exp * jitter));
676
+ }
677
+ async function delay(ms, signal) {
678
+ if (ms <= 0) {
679
+ return;
680
+ }
681
+ await new Promise((resolve, reject) => {
682
+ if (signal?.aborted) {
683
+ reject(new DOMException("The operation was aborted.", "AbortError"));
684
+ return;
685
+ }
686
+ const id = setTimeout(() => {
687
+ if (signal) {
688
+ signal.removeEventListener("abort", onAbort);
689
+ }
690
+ resolve();
691
+ }, ms);
692
+ const onAbort = () => {
693
+ clearTimeout(id);
694
+ reject(new DOMException("The operation was aborted.", "AbortError"));
695
+ };
696
+ signal?.addEventListener("abort", onAbort, { once: true });
697
+ });
698
+ }
699
+ async function drainResponseBody(response) {
700
+ try {
701
+ await response.arrayBuffer();
702
+ } catch {
703
+ }
704
+ }
705
+ var AnthropicAdapter = class extends chunkOZO7D77N_cjs.BaseModelAdapter {
706
+ name;
707
+ apiKey;
708
+ baseUrl;
709
+ model;
710
+ version;
711
+ requestMetadata;
712
+ fetchRetry;
713
+ constructor(config = {}) {
714
+ super();
715
+ this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY || "";
716
+ this.baseUrl = config.baseUrl || process.env.ANTHROPIC_BASE_URL || "https://api.anthropic.com";
717
+ this.model = config.model || "claude-sonnet-4-20250514";
718
+ this.version = config.version || "2023-06-01";
719
+ this.requestMetadata = config.metadata;
720
+ this.fetchRetry = normalizeFetchRetry(config.fetchRetry);
721
+ if (!this.apiKey) {
722
+ throw new Error("Anthropic API key is required. Set ANTHROPIC_API_KEY environment variable or pass apiKey in config.");
723
+ }
724
+ this.name = `anthropic/${this.model}`;
725
+ this.capabilities = config.capabilities ?? DEFAULT_ADAPTER_CAPABILITIES;
726
+ }
727
+ async *stream(params) {
728
+ const body = this.buildRequestBody(params, true);
729
+ const response = await this.fetch("/v1/messages", body, "stream", params);
730
+ if (!response.ok) {
731
+ const error = await response.text();
732
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
733
+ }
734
+ const reader = response.body?.getReader();
735
+ if (!reader) {
736
+ throw new Error("No response body");
737
+ }
738
+ const decoder = new TextDecoder();
739
+ let buffer = "";
740
+ let currentToolCall = null;
741
+ let currentThinkingBlock = null;
742
+ try {
743
+ while (true) {
744
+ if (params.signal?.aborted) {
745
+ reader.cancel();
746
+ break;
747
+ }
748
+ const { done, value } = await reader.read();
749
+ if (done) break;
750
+ buffer += decoder.decode(value, { stream: true });
751
+ const lines = buffer.split("\n");
752
+ buffer = lines.pop() || "";
753
+ for (const line of lines) {
754
+ const trimmed = line.trim();
755
+ if (!trimmed || !trimmed.startsWith("data:")) continue;
756
+ let jsonStart = 5;
757
+ if (trimmed.length > 5 && trimmed[5] === " ") {
758
+ jsonStart = 6;
759
+ }
760
+ const jsonStr = trimmed.slice(jsonStart);
761
+ try {
762
+ const data = JSON.parse(jsonStr);
763
+ const raw = params.includeRawStreamEvents ? { providerRaw: data } : {};
764
+ switch (data.type) {
765
+ case "content_block_start":
766
+ if (data.content_block?.type === "tool_use") {
767
+ currentToolCall = {
768
+ id: data.content_block.id,
769
+ name: data.content_block.name,
770
+ input: ""
771
+ };
772
+ yield {
773
+ type: "tool_call_start",
774
+ toolCall: {
775
+ id: data.content_block.id,
776
+ name: data.content_block.name,
777
+ arguments: {}
778
+ },
779
+ ...raw
780
+ };
781
+ } else if (data.content_block?.type === "thinking") {
782
+ currentThinkingBlock = {
783
+ signature: data.content_block.signature
784
+ };
785
+ yield {
786
+ type: "thinking",
787
+ content: data.content_block.thinking,
788
+ signature: currentThinkingBlock.signature,
789
+ ...raw
790
+ };
791
+ }
792
+ break;
793
+ case "content_block_delta":
794
+ if (data.delta?.type === "text_delta") {
795
+ yield { type: "text", content: data.delta.text, ...raw };
796
+ } else if (data.delta?.type === "thinking_delta") {
797
+ yield {
798
+ type: "thinking",
799
+ content: data.delta.thinking,
800
+ signature: currentThinkingBlock?.signature,
801
+ ...raw
802
+ };
803
+ } else if (data.delta?.type === "input_json_delta" && currentToolCall) {
804
+ currentToolCall.input += data.delta.partial_json;
805
+ yield {
806
+ type: "tool_call_delta",
807
+ content: data.delta.partial_json,
808
+ toolCallId: currentToolCall.id,
809
+ ...raw
810
+ };
811
+ }
812
+ break;
813
+ case "content_block_stop":
814
+ if (currentToolCall) {
815
+ yield {
816
+ type: "tool_call",
817
+ toolCall: {
818
+ id: currentToolCall.id,
819
+ name: currentToolCall.name,
820
+ arguments: this.safeParseJSON(currentToolCall.input)
821
+ },
822
+ ...raw
823
+ };
824
+ currentToolCall = null;
825
+ }
826
+ if (currentThinkingBlock) {
827
+ yield { type: "thinking_block_end", ...raw };
828
+ currentThinkingBlock = null;
829
+ }
830
+ break;
831
+ case "message_start":
832
+ if (data.message?.usage) {
833
+ const usage = data.message.usage;
834
+ const actualInputTokens = usage.input_tokens + (usage.cache_read_input_tokens || 0);
835
+ yield {
836
+ type: "metadata",
837
+ usagePhase: "input",
838
+ metadata: {
839
+ usage: {
840
+ promptTokens: actualInputTokens,
841
+ completionTokens: 0,
842
+ totalTokens: actualInputTokens,
843
+ // 传递缓存信息
844
+ cacheReadTokens: usage.cache_read_input_tokens || 0,
845
+ cacheWriteTokens: usage.cache_creation_input_tokens || 0
846
+ }
847
+ },
848
+ ...raw
849
+ };
850
+ }
851
+ break;
852
+ case "message_delta":
853
+ if (data.usage) {
854
+ yield {
855
+ type: "metadata",
856
+ usagePhase: "output",
857
+ metadata: {
858
+ usage: {
859
+ promptTokens: 0,
860
+ completionTokens: data.usage.output_tokens,
861
+ totalTokens: data.usage.output_tokens
862
+ }
863
+ },
864
+ ...raw
865
+ };
866
+ }
867
+ break;
868
+ }
869
+ } catch (error) {
870
+ logModelStreamParseError(
871
+ {
872
+ provider: "anthropic",
873
+ model: this.model,
874
+ path: "/v1/messages",
875
+ operation: "stream",
876
+ params
877
+ },
878
+ jsonStr,
879
+ error
880
+ );
881
+ }
882
+ }
883
+ }
884
+ yield { type: "done" };
885
+ } finally {
886
+ reader.releaseLock();
887
+ }
888
+ }
889
+ async complete(params) {
890
+ const body = this.buildRequestBody(params, false);
891
+ const response = await this.fetch("/v1/messages", body, "complete", params);
892
+ if (!response.ok) {
893
+ const error = await response.text();
894
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
895
+ }
896
+ const data = await response.json();
897
+ const result = {
898
+ content: ""
899
+ };
900
+ const toolCalls = [];
901
+ for (const block of data.content || []) {
902
+ if (block.type === "text") {
903
+ result.content += block.text;
904
+ } else if (block.type === "tool_use") {
905
+ toolCalls.push({
906
+ id: block.id,
907
+ name: block.name,
908
+ arguments: block.input
909
+ });
910
+ }
911
+ }
912
+ if (toolCalls.length > 0) {
913
+ result.toolCalls = toolCalls;
914
+ }
915
+ if (data.usage) {
916
+ const usage = data.usage;
917
+ const actualInputTokens = usage.input_tokens + (usage.cache_read_input_tokens || 0);
918
+ result.usage = {
919
+ promptTokens: actualInputTokens,
920
+ completionTokens: usage.output_tokens,
921
+ totalTokens: actualInputTokens + usage.output_tokens
922
+ };
923
+ }
924
+ return result;
925
+ }
926
+ buildRequestBody(params, stream) {
927
+ const { system, messages } = this.extractSystemMessage(params.messages);
928
+ const transformedMessages = this.transformAnthropicMessages(messages);
929
+ const defaultMaxTokens = this.capabilities?.maxOutputTokens ?? DEFAULT_ADAPTER_CAPABILITIES.maxOutputTokens;
930
+ const body = {
931
+ model: this.model,
932
+ max_tokens: params.maxTokens ?? defaultMaxTokens,
933
+ messages: transformedMessages,
934
+ stream,
935
+ ...system && { system },
936
+ ...params.temperature !== void 0 && { temperature: params.temperature }
937
+ };
938
+ if (params.tools && params.tools.length > 0) {
939
+ body.tools = chunkOZO7D77N_cjs.toolsToModelSchema(params.tools).map((tool) => ({
940
+ name: tool.name,
941
+ description: tool.description,
942
+ input_schema: tool.parameters
943
+ }));
944
+ }
945
+ const mergedMetadata = this.mergeAnthropicMetadata(params);
946
+ if (mergedMetadata && Object.keys(mergedMetadata).length > 0) {
947
+ body.metadata = mergedMetadata;
948
+ }
949
+ return body;
950
+ }
951
+ /**
952
+ * Build Messages API `metadata`: `sessionId` → `user_id`, merged with resolved adapter `metadata` (dict or fn).
953
+ * Config `metadata` keys override `user_id` when duplicated.
954
+ */
955
+ mergeAnthropicMetadata(params) {
956
+ const extra = this.resolveMetadataExtra(params);
957
+ const hasSession = params.sessionId !== void 0 && params.sessionId !== "";
958
+ if (!hasSession && extra === void 0) {
959
+ return void 0;
960
+ }
961
+ const merged = {};
962
+ if (hasSession) {
963
+ merged.user_id = params.sessionId;
964
+ }
965
+ if (extra !== void 0) {
966
+ Object.assign(merged, extra);
967
+ }
968
+ return Object.keys(merged).length > 0 ? merged : void 0;
969
+ }
970
+ resolveMetadataExtra(params) {
971
+ const raw = this.requestMetadata;
972
+ if (raw == null) {
973
+ return void 0;
974
+ }
975
+ if (typeof raw === "function") {
976
+ const v = raw(params);
977
+ if (typeof v !== "object" || v === null || Array.isArray(v) || Object.keys(v).length === 0) {
978
+ return void 0;
979
+ }
980
+ return { ...v };
981
+ }
982
+ if (typeof raw === "object" && !Array.isArray(raw) && Object.keys(raw).length > 0) {
983
+ return { ...raw };
984
+ }
985
+ return void 0;
986
+ }
987
+ extractSystemMessage(messages) {
988
+ const systemMessages = messages.filter((m) => m.role === "system");
989
+ const otherMessages = messages.filter((m) => m.role !== "system");
990
+ const combinedSystem = systemMessages.length > 0 ? systemMessages.map((m) => m.content).join("\n\n") : void 0;
991
+ return {
992
+ system: combinedSystem,
993
+ messages: otherMessages
994
+ };
995
+ }
996
+ transformAnthropicMessages(messages) {
997
+ return messages.map((msg) => {
998
+ const transformed = {
999
+ role: msg.role === "assistant" ? "assistant" : "user",
1000
+ content: []
1001
+ };
1002
+ if (typeof msg.content === "string") {
1003
+ transformed.content = [{ type: "text", text: msg.content }];
1004
+ } else if (Array.isArray(msg.content)) {
1005
+ const contentParts = [];
1006
+ for (const part of msg.content) {
1007
+ if (part.type === "thinking") {
1008
+ contentParts.push(part);
1009
+ } else if (part.type === "text") {
1010
+ contentParts.push({ type: "text", text: part.text });
1011
+ } else {
1012
+ contentParts.push(part);
1013
+ }
1014
+ }
1015
+ transformed.content = contentParts;
1016
+ if (contentParts.length === 0) {
1017
+ transformed.content = "";
1018
+ }
1019
+ }
1020
+ if (msg.toolCalls && msg.role === "assistant") {
1021
+ for (const tc of msg.toolCalls) {
1022
+ transformed.content.push({
1023
+ type: "tool_use",
1024
+ id: tc.id,
1025
+ name: tc.name,
1026
+ input: tc.arguments
1027
+ });
1028
+ }
1029
+ }
1030
+ if (msg.role === "tool" && msg.toolCallId) {
1031
+ transformed.role = "user";
1032
+ transformed.content = [{
1033
+ type: "tool_result",
1034
+ tool_use_id: msg.toolCallId,
1035
+ content: msg.content
1036
+ }];
1037
+ }
1038
+ return transformed;
1039
+ });
1040
+ }
1041
+ /**
1042
+ * 发起 POST;按 `fetchRetry` 对网络错误与 429/502/503/504 重试(不含响应体已开始消费后的 SSE 读失败)。
1043
+ */
1044
+ async fetch(path, body, operation, params) {
1045
+ const requestLog = logModelRequestStart(
1046
+ {
1047
+ provider: "anthropic",
1048
+ model: this.model,
1049
+ path,
1050
+ operation,
1051
+ params
1052
+ },
1053
+ body,
1054
+ { httpMaxAttempts: this.fetchRetry.maxAttempts }
1055
+ );
1056
+ const url = `${this.baseUrl}${path}`;
1057
+ const init = {
1058
+ method: "POST",
1059
+ headers: {
1060
+ "Content-Type": "application/json",
1061
+ "x-api-key": this.apiKey,
1062
+ "anthropic-version": this.version
1063
+ },
1064
+ body: JSON.stringify(body),
1065
+ signal: params.signal
1066
+ };
1067
+ for (let attempt = 0; attempt < this.fetchRetry.maxAttempts; attempt++) {
1068
+ const httpAttemptMeta = {
1069
+ httpAttempt: attempt + 1,
1070
+ httpMaxAttempts: this.fetchRetry.maxAttempts
1071
+ };
1072
+ if (params.signal?.aborted) {
1073
+ logModelRequestFailure(
1074
+ {
1075
+ provider: "anthropic",
1076
+ model: this.model,
1077
+ path,
1078
+ operation,
1079
+ params
1080
+ },
1081
+ requestLog,
1082
+ new DOMException("The operation was aborted.", "AbortError"),
1083
+ { httpMaxAttempts: this.fetchRetry.maxAttempts }
1084
+ );
1085
+ throw new DOMException("The operation was aborted.", "AbortError");
1086
+ }
1087
+ try {
1088
+ const response = await globalThis.fetch(url, init);
1089
+ if (response.ok) {
1090
+ logModelRequestEnd(
1091
+ {
1092
+ provider: "anthropic",
1093
+ model: this.model,
1094
+ path,
1095
+ operation,
1096
+ params
1097
+ },
1098
+ requestLog,
1099
+ response,
1100
+ httpAttemptMeta
1101
+ );
1102
+ return response;
1103
+ }
1104
+ const canRetryHttp = attempt < this.fetchRetry.maxAttempts - 1 && isRetriableHttpStatus(response.status);
1105
+ if (canRetryHttp) {
1106
+ await drainResponseBody(response);
1107
+ const fromHeader = parseRetryAfterMs(response.headers.get("Retry-After"));
1108
+ const backoff = computeBackoffMs(attempt, this.fetchRetry.baseDelayMs, this.fetchRetry.maxDelayMs);
1109
+ const waitMs = fromHeader != null ? Math.min(fromHeader, this.fetchRetry.maxDelayMs) : backoff;
1110
+ await delay(waitMs, params.signal);
1111
+ continue;
1112
+ }
1113
+ logModelRequestEnd(
1114
+ {
1115
+ provider: "anthropic",
1116
+ model: this.model,
1117
+ path,
1118
+ operation,
1119
+ params
1120
+ },
1121
+ requestLog,
1122
+ response,
1123
+ httpAttemptMeta
1124
+ );
1125
+ return response;
1126
+ } catch (e) {
1127
+ if (isAbortError(e) || params.signal?.aborted) {
1128
+ logModelRequestFailure(
1129
+ {
1130
+ provider: "anthropic",
1131
+ model: this.model,
1132
+ path,
1133
+ operation,
1134
+ params
1135
+ },
1136
+ requestLog,
1137
+ e,
1138
+ httpAttemptMeta
1139
+ );
1140
+ throw e;
1141
+ }
1142
+ if (attempt < this.fetchRetry.maxAttempts - 1 && isRetriableFetchError(e)) {
1143
+ const backoff = computeBackoffMs(attempt, this.fetchRetry.baseDelayMs, this.fetchRetry.maxDelayMs);
1144
+ await delay(backoff, params.signal);
1145
+ continue;
1146
+ }
1147
+ logModelRequestFailure(
1148
+ {
1149
+ provider: "anthropic",
1150
+ model: this.model,
1151
+ path,
1152
+ operation,
1153
+ params
1154
+ },
1155
+ requestLog,
1156
+ e,
1157
+ httpAttemptMeta
1158
+ );
1159
+ throw e;
1160
+ }
1161
+ }
1162
+ throw new Error("Anthropic fetch: unexpected retry loop exit");
1163
+ }
1164
+ safeParseJSON(str) {
1165
+ try {
1166
+ return JSON.parse(str);
1167
+ } catch {
1168
+ return str;
1169
+ }
1170
+ }
1171
+ };
1172
+ function createAnthropic(config) {
1173
+ return new AnthropicAdapter(config);
1174
+ }
1175
+
1176
+ // src/models/ollama.ts
1177
+ function ollamaStreamChunksFromChatData(data, parseToolArguments, nextToolCallId) {
1178
+ const chunks = [];
1179
+ const msg = data.message;
1180
+ if (!msg) return chunks;
1181
+ const thinking = msg.thinking;
1182
+ if (typeof thinking === "string" && thinking.length > 0) {
1183
+ chunks.push({ type: "thinking", content: thinking });
1184
+ }
1185
+ const content = msg.content;
1186
+ if (typeof content === "string" && content.length > 0) {
1187
+ chunks.push({ type: "text", content });
1188
+ }
1189
+ const toolCalls = msg.tool_calls;
1190
+ if (toolCalls && Array.isArray(toolCalls)) {
1191
+ for (const tc of toolCalls) {
1192
+ const t = tc;
1193
+ const fn = t.function;
1194
+ chunks.push({
1195
+ type: "tool_call",
1196
+ toolCall: {
1197
+ id: nextToolCallId(),
1198
+ name: (typeof fn?.name === "string" ? fn.name : "") || "",
1199
+ arguments: parseToolArguments(fn?.arguments)
1200
+ }
1201
+ });
1202
+ }
1203
+ }
1204
+ return chunks;
1205
+ }
1206
+ function ollamaMessageContentToApiString(content) {
1207
+ if (typeof content === "string") return content;
1208
+ if (!Array.isArray(content)) return "";
1209
+ const texts = [];
1210
+ for (const part of content) {
1211
+ if (part.type === "text") {
1212
+ texts.push(part.text);
1213
+ }
1214
+ }
1215
+ return texts.join("\n\n");
1216
+ }
1217
+ function uniqueOllamaToolCallId(batchMs, index) {
1218
+ return `ollama_${batchMs}_${index}_${Math.random().toString(36).slice(2, 11)}`;
1219
+ }
1220
+ var OllamaAdapter = class extends chunkOZO7D77N_cjs.BaseModelAdapter {
1221
+ name;
1222
+ baseUrl;
1223
+ model;
1224
+ think;
1225
+ constructor(config = {}) {
1226
+ super();
1227
+ this.baseUrl = config.baseUrl || process.env.OLLAMA_BASE_URL || "http://localhost:11434";
1228
+ this.model = config.model || "qwen3.5:0.8b";
1229
+ this.think = config.think;
1230
+ this.name = `ollama/${this.model}`;
1231
+ this.capabilities = config.capabilities ?? DEFAULT_ADAPTER_CAPABILITIES;
1232
+ }
1233
+ async *stream(params) {
1234
+ const body = this.buildRequestBody(params, true);
1235
+ const response = await this.fetch("/api/chat", body, "stream", params);
1236
+ if (!response.ok) {
1237
+ const error = await response.text();
1238
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
1239
+ }
1240
+ const reader = response.body?.getReader();
1241
+ if (!reader) {
1242
+ throw new Error("No response body");
1243
+ }
1244
+ const decoder = new TextDecoder();
1245
+ let buffer = "";
1246
+ const nextToolCallId = () => `ollama_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`;
1247
+ try {
1248
+ while (true) {
1249
+ if (params.signal?.aborted) {
1250
+ reader.cancel();
1251
+ break;
1252
+ }
1253
+ const { done, value } = await reader.read();
1254
+ if (done) break;
1255
+ buffer += decoder.decode(value, { stream: true });
1256
+ const lines = buffer.split("\n");
1257
+ buffer = lines.pop() || "";
1258
+ for (const line of lines) {
1259
+ const trimmed = line.trim();
1260
+ if (!trimmed) continue;
1261
+ try {
1262
+ const data = JSON.parse(trimmed);
1263
+ const raw = params.includeRawStreamEvents ? { providerRaw: data } : {};
1264
+ const messageChunks = ollamaStreamChunksFromChatData(
1265
+ data,
1266
+ (args) => this.parseToolArguments(args),
1267
+ nextToolCallId
1268
+ );
1269
+ for (const chunk of messageChunks) {
1270
+ yield { ...chunk, ...raw };
1271
+ }
1272
+ if (data.done) {
1273
+ if (data.prompt_eval_count || data.eval_count) {
1274
+ yield {
1275
+ type: "metadata",
1276
+ usagePhase: "output",
1277
+ metadata: {
1278
+ usage: {
1279
+ promptTokens: data.prompt_eval_count || 0,
1280
+ completionTokens: data.eval_count || 0,
1281
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0)
1282
+ }
1283
+ },
1284
+ ...raw
1285
+ };
1286
+ }
1287
+ yield { type: "done", ...raw };
1288
+ }
1289
+ } catch (error) {
1290
+ logModelStreamParseError(
1291
+ {
1292
+ provider: "ollama",
1293
+ model: this.model,
1294
+ path: "/api/chat",
1295
+ operation: "stream",
1296
+ params
1297
+ },
1298
+ trimmed,
1299
+ error
1300
+ );
1301
+ }
1302
+ }
1303
+ }
1304
+ } finally {
1305
+ reader.releaseLock();
1306
+ }
1307
+ }
1308
+ async complete(params) {
1309
+ const body = this.buildRequestBody(params, false);
1310
+ const response = await this.fetch("/api/chat", body, "complete", params);
1311
+ if (!response.ok) {
1312
+ const error = await response.text();
1313
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
1314
+ }
1315
+ const data = await response.json();
1316
+ const result = {
1317
+ content: data.message?.content || ""
1318
+ };
1319
+ const thinking = data.message?.thinking;
1320
+ if (typeof thinking === "string" && thinking.length > 0) {
1321
+ result.thinking = thinking;
1322
+ }
1323
+ if (data.message?.tool_calls) {
1324
+ const batchMs = Date.now();
1325
+ result.toolCalls = data.message.tool_calls.map((tc, index) => ({
1326
+ id: uniqueOllamaToolCallId(batchMs, index),
1327
+ name: tc.function?.name || "",
1328
+ arguments: this.parseToolArguments(tc.function?.arguments)
1329
+ }));
1330
+ }
1331
+ if (data.prompt_eval_count || data.eval_count) {
1332
+ result.usage = {
1333
+ promptTokens: data.prompt_eval_count || 0,
1334
+ completionTokens: data.eval_count || 0,
1335
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0)
1336
+ };
1337
+ }
1338
+ return result;
1339
+ }
1340
+ parseToolArguments(args) {
1341
+ if (args == null) return {};
1342
+ if (typeof args === "object" && !Array.isArray(args)) return args;
1343
+ if (typeof args === "string") {
1344
+ try {
1345
+ const parsed = JSON.parse(args);
1346
+ return typeof parsed === "object" && parsed !== null ? parsed : { value: parsed };
1347
+ } catch {
1348
+ return {};
1349
+ }
1350
+ }
1351
+ return {};
1352
+ }
1353
+ /**
1354
+ * Ollama 要求 tool_calls.function.arguments 为对象,而非 JSON 字符串。
1355
+ * 工具结果消息使用 tool_name(见 https://docs.ollama.com/capabilities/tool-calling ),非 OpenAI 的 tool_call_id。
1356
+ */
1357
+ transformMessages(messages) {
1358
+ const toolCallIdToName = /* @__PURE__ */ new Map();
1359
+ for (const msg of messages) {
1360
+ if (msg.role === "assistant" && msg.toolCalls) {
1361
+ for (const tc of msg.toolCalls) {
1362
+ toolCallIdToName.set(tc.id, tc.name);
1363
+ }
1364
+ }
1365
+ }
1366
+ return messages.map((msg) => {
1367
+ if (msg.role === "tool" && msg.toolCallId) {
1368
+ const toolName = toolCallIdToName.get(msg.toolCallId) ?? msg.name;
1369
+ return {
1370
+ role: "tool",
1371
+ content: ollamaMessageContentToApiString(msg.content),
1372
+ ...toolName && { tool_name: toolName }
1373
+ };
1374
+ }
1375
+ return {
1376
+ role: msg.role,
1377
+ content: ollamaMessageContentToApiString(msg.content),
1378
+ ...msg.toolCalls && { tool_calls: msg.toolCalls.map((tc) => ({
1379
+ id: tc.id,
1380
+ type: "function",
1381
+ function: {
1382
+ name: tc.name,
1383
+ arguments: this.parseToolArguments(tc.arguments)
1384
+ }
1385
+ })) }
1386
+ };
1387
+ });
1388
+ }
1389
+ buildRequestBody(params, stream) {
1390
+ const defaultMaxTokens = this.capabilities?.maxOutputTokens ?? DEFAULT_ADAPTER_CAPABILITIES.maxOutputTokens;
1391
+ const options = {
1392
+ num_predict: params.maxTokens ?? defaultMaxTokens
1393
+ };
1394
+ if (params.temperature !== void 0) {
1395
+ options.temperature = params.temperature;
1396
+ }
1397
+ const body = {
1398
+ model: this.model,
1399
+ messages: this.transformMessages(params.messages),
1400
+ stream,
1401
+ options
1402
+ };
1403
+ if (this.think !== void 0) {
1404
+ body.think = this.think;
1405
+ }
1406
+ if (params.tools && params.tools.length > 0) {
1407
+ body.tools = chunkOZO7D77N_cjs.toolsToModelSchema(params.tools).map((tool) => ({
1408
+ type: "function",
1409
+ function: tool
1410
+ }));
1411
+ }
1412
+ return body;
1413
+ }
1414
+ async fetch(path, body, operation, params) {
1415
+ const requestLog = logModelRequestStart({
1416
+ provider: "ollama",
1417
+ model: this.model,
1418
+ path,
1419
+ operation,
1420
+ params
1421
+ }, body);
1422
+ try {
1423
+ const response = await globalThis.fetch(`${this.baseUrl}${path}`, {
1424
+ method: "POST",
1425
+ headers: {
1426
+ "Content-Type": "application/json"
1427
+ },
1428
+ body: JSON.stringify(body),
1429
+ signal: params.signal
1430
+ });
1431
+ logModelRequestEnd(
1432
+ {
1433
+ provider: "ollama",
1434
+ model: this.model,
1435
+ path,
1436
+ operation,
1437
+ params
1438
+ },
1439
+ requestLog,
1440
+ response
1441
+ );
1442
+ return response;
1443
+ } catch (error) {
1444
+ logModelRequestFailure(
1445
+ {
1446
+ provider: "ollama",
1447
+ model: this.model,
1448
+ path,
1449
+ operation,
1450
+ params
1451
+ },
1452
+ requestLog,
1453
+ error
1454
+ );
1455
+ throw error;
1456
+ }
1457
+ }
1458
+ };
1459
+ function createOllama(config) {
1460
+ return new OllamaAdapter(config);
1461
+ }
1462
+
1463
+ // src/core/process-env-merge.ts
1464
+ function mergeProcessEnv(overrides) {
1465
+ const base = {};
1466
+ for (const [key, value] of Object.entries(process.env)) {
1467
+ if (typeof value === "string") {
1468
+ base[key] = value;
1469
+ }
1470
+ }
1471
+ return overrides ? { ...base, ...overrides } : base;
1472
+ }
1473
+ function mergeMcpStdioEnv(agentEnv, serverEnv) {
1474
+ const merged = mergeProcessEnv(agentEnv);
1475
+ return serverEnv ? { ...merged, ...serverEnv } : merged;
1476
+ }
1477
+
1478
+ // src/models/index.ts
1479
+ function createModel(modelConfig, agentEnv) {
1480
+ const merged = mergeProcessEnv(agentEnv);
1481
+ switch (modelConfig.provider) {
1482
+ case "openai":
1483
+ return new OpenAIAdapter({
1484
+ apiKey: modelConfig.apiKey || merged.OPENAI_API_KEY || "",
1485
+ baseUrl: modelConfig.baseUrl || merged.OPENAI_BASE_URL,
1486
+ model: modelConfig.model,
1487
+ organization: merged.OPENAI_ORG_ID
1488
+ });
1489
+ case "anthropic":
1490
+ return new AnthropicAdapter({
1491
+ apiKey: modelConfig.apiKey || merged.ANTHROPIC_API_KEY || "",
1492
+ baseUrl: modelConfig.baseUrl || merged.ANTHROPIC_BASE_URL,
1493
+ model: modelConfig.model
1494
+ });
1495
+ case "ollama":
1496
+ return new OllamaAdapter({
1497
+ baseUrl: modelConfig.baseUrl || merged.OLLAMA_BASE_URL,
1498
+ model: modelConfig.model,
1499
+ think: modelConfig.think
1500
+ });
1501
+ }
1502
+ throw new Error(`Unknown model provider: ${modelConfig.provider}`);
1503
+ }
1504
+
1505
+ exports.AnthropicAdapter = AnthropicAdapter;
1506
+ exports.DEFAULT_ADAPTER_CAPABILITIES = DEFAULT_ADAPTER_CAPABILITIES;
1507
+ exports.OllamaAdapter = OllamaAdapter;
1508
+ exports.OpenAIAdapter = OpenAIAdapter;
1509
+ exports.createAnthropic = createAnthropic;
1510
+ exports.createConsoleSDKLogger = createConsoleSDKLogger;
1511
+ exports.createModel = createModel;
1512
+ exports.createOllama = createOllama;
1513
+ exports.createOpenAI = createOpenAI;
1514
+ exports.emitSDKLog = emitSDKLog;
1515
+ exports.formatSDKLog = formatSDKLog;
1516
+ exports.mergeMcpStdioEnv = mergeMcpStdioEnv;
1517
+ exports.mergeProcessEnv = mergeProcessEnv;
1518
+ exports.ollamaMessageContentToApiString = ollamaMessageContentToApiString;
1519
+ exports.ollamaStreamChunksFromChatData = ollamaStreamChunksFromChatData;
1520
+ //# sourceMappingURL=chunk-NYZD3THB.cjs.map
1521
+ //# sourceMappingURL=chunk-NYZD3THB.cjs.map