fourmis-agents-sdk 0.2.4 → 0.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,710 @@
1
+ // @bun
2
+ var __defProp = Object.defineProperty;
3
+ var __export = (target, all) => {
4
+ for (var name in all)
5
+ __defProp(target, name, {
6
+ get: all[name],
7
+ enumerable: true,
8
+ configurable: true,
9
+ set: (newValue) => all[name] = () => newValue
10
+ });
11
+ };
12
+ var __esm = (fn, res) => () => (fn && (res = fn(fn = 0)), res);
13
+ var __require = import.meta.require;
14
+
15
+ // src/auth/gemini-oauth.ts
16
+ var exports_gemini_oauth = {};
17
+ __export(exports_gemini_oauth, {
18
+ loadTokensSync: () => loadTokensSync,
19
+ loadTokens: () => loadTokens,
20
+ isLoggedIn: () => isLoggedIn,
21
+ getValidToken: () => getValidToken
22
+ });
23
+ import { readFileSync, writeFileSync, existsSync } from "fs";
24
+ import { join } from "path";
25
+ import { homedir } from "os";
26
+ function getHome() {
27
+ return process.env.HOME ?? homedir();
28
+ }
29
+ function tokenPath() {
30
+ return join(getHome(), ".gemini", "oauth_creds.json");
31
+ }
32
+ function loadTokens() {
33
+ const p = tokenPath();
34
+ try {
35
+ const raw = readFileSync(p, "utf-8");
36
+ const data = JSON.parse(raw);
37
+ if (data.access_token && data.refresh_token) {
38
+ return data;
39
+ }
40
+ } catch {}
41
+ return null;
42
+ }
43
+ function loadTokensSync() {
44
+ return loadTokens();
45
+ }
46
+ function saveTokens(tokens) {
47
+ const p = tokenPath();
48
+ const dir = join(getHome(), ".gemini");
49
+ if (!existsSync(dir)) {
50
+ const { mkdirSync } = __require("fs");
51
+ mkdirSync(dir, { recursive: true });
52
+ }
53
+ writeFileSync(p, JSON.stringify(tokens, null, 2), { mode: 384 });
54
+ }
55
+ async function refreshAccessToken(refreshToken) {
56
+ const res = await fetch(GOOGLE_TOKEN_URL, {
57
+ method: "POST",
58
+ headers: { "Content-Type": "application/x-www-form-urlencoded" },
59
+ body: new URLSearchParams({
60
+ grant_type: "refresh_token",
61
+ client_id: GEMINI_CLIENT_ID,
62
+ client_secret: GEMINI_CLIENT_SECRET,
63
+ refresh_token: refreshToken
64
+ })
65
+ });
66
+ if (!res.ok) {
67
+ const text = await res.text();
68
+ throw new Error(`Gemini token refresh failed (${res.status}): ${text}`);
69
+ }
70
+ return res.json();
71
+ }
72
+ async function getValidToken() {
73
+ const tokens = loadTokens();
74
+ if (!tokens)
75
+ return null;
76
+ const expiresAt = tokens.expires_at ?? tokens.expiry_date;
77
+ const needsRefresh = expiresAt ? expiresAt <= Date.now() + 300000 : true;
78
+ if (needsRefresh) {
79
+ try {
80
+ const fresh = await refreshAccessToken(tokens.refresh_token);
81
+ const expiryMs = Date.now() + fresh.expires_in * 1000;
82
+ const updated = {
83
+ access_token: fresh.access_token,
84
+ refresh_token: fresh.refresh_token ?? tokens.refresh_token,
85
+ token_type: fresh.token_type ?? "Bearer",
86
+ expires_in: fresh.expires_in,
87
+ expires_at: expiryMs,
88
+ expiry_date: expiryMs
89
+ };
90
+ saveTokens(updated);
91
+ return { accessToken: updated.access_token };
92
+ } catch {
93
+ return { accessToken: tokens.access_token };
94
+ }
95
+ }
96
+ return { accessToken: tokens.access_token };
97
+ }
98
+ function isLoggedIn() {
99
+ return loadTokens() !== null;
100
+ }
101
+ var GEMINI_CLIENT_ID = "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com", GEMINI_CLIENT_SECRET = "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", GOOGLE_TOKEN_URL = "https://oauth2.googleapis.com/token";
102
+ var init_gemini_oauth = () => {};
103
+
104
+ // src/utils/cost.ts
105
+ var ANTHROPIC_PRICING = {
106
+ "claude-opus-4-6": {
107
+ inputPerMillion: 15,
108
+ outputPerMillion: 75,
109
+ cacheReadPerMillion: 1.5,
110
+ cacheWritePerMillion: 18.75
111
+ },
112
+ "claude-sonnet-4-5-20250929": {
113
+ inputPerMillion: 3,
114
+ outputPerMillion: 15,
115
+ cacheReadPerMillion: 0.3,
116
+ cacheWritePerMillion: 3.75
117
+ },
118
+ "claude-haiku-4-5-20251001": {
119
+ inputPerMillion: 0.8,
120
+ outputPerMillion: 4,
121
+ cacheReadPerMillion: 0.08,
122
+ cacheWritePerMillion: 1
123
+ },
124
+ "claude-3-5-sonnet-20241022": {
125
+ inputPerMillion: 3,
126
+ outputPerMillion: 15,
127
+ cacheReadPerMillion: 0.3,
128
+ cacheWritePerMillion: 3.75
129
+ },
130
+ "claude-3-5-haiku-20241022": {
131
+ inputPerMillion: 0.8,
132
+ outputPerMillion: 4,
133
+ cacheReadPerMillion: 0.08,
134
+ cacheWritePerMillion: 1
135
+ }
136
+ };
137
+ var ANTHROPIC_CONTEXT_WINDOWS = {
138
+ "claude-opus-4-6": 200000,
139
+ "claude-sonnet-4-5-20250929": 200000,
140
+ "claude-haiku-4-5-20251001": 200000,
141
+ "claude-3-5-sonnet-20241022": 200000,
142
+ "claude-3-5-haiku-20241022": 200000
143
+ };
144
+ var ANTHROPIC_MAX_OUTPUT = {
145
+ "claude-opus-4-6": 32000,
146
+ "claude-sonnet-4-5-20250929": 16384,
147
+ "claude-haiku-4-5-20251001": 8192,
148
+ "claude-3-5-sonnet-20241022": 8192,
149
+ "claude-3-5-haiku-20241022": 8192
150
+ };
151
+ function calculateAnthropicCost(model, usage) {
152
+ const pricing = findPricing(model);
153
+ if (!pricing)
154
+ return 0;
155
+ const inputCost = usage.inputTokens / 1e6 * pricing.inputPerMillion;
156
+ const outputCost = usage.outputTokens / 1e6 * pricing.outputPerMillion;
157
+ const cacheReadCost = usage.cacheReadInputTokens / 1e6 * (pricing.cacheReadPerMillion ?? pricing.inputPerMillion);
158
+ const cacheWriteCost = usage.cacheCreationInputTokens / 1e6 * (pricing.cacheWritePerMillion ?? pricing.inputPerMillion);
159
+ return inputCost + outputCost + cacheReadCost + cacheWriteCost;
160
+ }
161
+ function findPricing(model) {
162
+ if (ANTHROPIC_PRICING[model])
163
+ return ANTHROPIC_PRICING[model];
164
+ for (const [key, pricing] of Object.entries(ANTHROPIC_PRICING)) {
165
+ if (key.startsWith(model) || model.startsWith(key.split("-2025")[0])) {
166
+ return pricing;
167
+ }
168
+ }
169
+ return;
170
+ }
171
+ var OPENAI_PRICING = {
172
+ "gpt-4.1": {
173
+ inputPerMillion: 2,
174
+ outputPerMillion: 8,
175
+ cacheReadPerMillion: 0.5
176
+ },
177
+ "gpt-4.1-mini": {
178
+ inputPerMillion: 0.4,
179
+ outputPerMillion: 1.6,
180
+ cacheReadPerMillion: 0.1
181
+ },
182
+ "gpt-4.1-nano": {
183
+ inputPerMillion: 0.1,
184
+ outputPerMillion: 0.4,
185
+ cacheReadPerMillion: 0.025
186
+ },
187
+ "gpt-4o": {
188
+ inputPerMillion: 2.5,
189
+ outputPerMillion: 10,
190
+ cacheReadPerMillion: 1.25
191
+ },
192
+ "gpt-4o-mini": {
193
+ inputPerMillion: 0.15,
194
+ outputPerMillion: 0.6,
195
+ cacheReadPerMillion: 0.075
196
+ },
197
+ o3: {
198
+ inputPerMillion: 2,
199
+ outputPerMillion: 8,
200
+ cacheReadPerMillion: 0.5
201
+ },
202
+ "o3-mini": {
203
+ inputPerMillion: 1.1,
204
+ outputPerMillion: 4.4,
205
+ cacheReadPerMillion: 0.275
206
+ },
207
+ "o4-mini": {
208
+ inputPerMillion: 1.1,
209
+ outputPerMillion: 4.4,
210
+ cacheReadPerMillion: 0.275
211
+ }
212
+ };
213
+ var OPENAI_CONTEXT_WINDOWS = {
214
+ "gpt-4.1": 1047576,
215
+ "gpt-4.1-mini": 1047576,
216
+ "gpt-4.1-nano": 1047576,
217
+ "gpt-4o": 128000,
218
+ "gpt-4o-mini": 128000,
219
+ o3: 200000,
220
+ "o3-mini": 200000,
221
+ "o4-mini": 200000
222
+ };
223
+ var OPENAI_MAX_OUTPUT = {
224
+ "gpt-4.1": 32768,
225
+ "gpt-4.1-mini": 32768,
226
+ "gpt-4.1-nano": 32768,
227
+ "gpt-4o": 16384,
228
+ "gpt-4o-mini": 16384,
229
+ o3: 1e5,
230
+ "o3-mini": 1e5,
231
+ "o4-mini": 1e5
232
+ };
233
+ function calculateOpenAICost(model, usage) {
234
+ const pricing = findOpenAIPricing(model);
235
+ if (!pricing)
236
+ return 0;
237
+ const inputCost = usage.inputTokens / 1e6 * pricing.inputPerMillion;
238
+ const outputCost = usage.outputTokens / 1e6 * pricing.outputPerMillion;
239
+ const cacheReadCost = usage.cacheReadInputTokens / 1e6 * (pricing.cacheReadPerMillion ?? pricing.inputPerMillion);
240
+ return inputCost + outputCost + cacheReadCost;
241
+ }
242
+ function findOpenAIPricing(model) {
243
+ if (OPENAI_PRICING[model])
244
+ return OPENAI_PRICING[model];
245
+ let bestKey = "";
246
+ let bestPricing;
247
+ for (const [key, pricing] of Object.entries(OPENAI_PRICING)) {
248
+ if (model.startsWith(key) && key.length > bestKey.length) {
249
+ bestKey = key;
250
+ bestPricing = pricing;
251
+ }
252
+ }
253
+ return bestPricing;
254
+ }
255
+ var GEMINI_PRICING = {
256
+ "gemini-2.5-pro": {
257
+ inputPerMillion: 1.25,
258
+ outputPerMillion: 10,
259
+ cacheReadPerMillion: 0.315
260
+ },
261
+ "gemini-2.5-flash": {
262
+ inputPerMillion: 0.15,
263
+ outputPerMillion: 0.6,
264
+ cacheReadPerMillion: 0.0375
265
+ },
266
+ "gemini-2.5-flash-lite": {
267
+ inputPerMillion: 0.075,
268
+ outputPerMillion: 0.3
269
+ },
270
+ "gemini-2.0-flash": {
271
+ inputPerMillion: 0.1,
272
+ outputPerMillion: 0.4,
273
+ cacheReadPerMillion: 0.025
274
+ },
275
+ "gemini-2.0-flash-lite": {
276
+ inputPerMillion: 0.075,
277
+ outputPerMillion: 0.3
278
+ }
279
+ };
280
+ var GEMINI_CONTEXT_WINDOWS = {
281
+ "gemini-2.5-pro": 1048576,
282
+ "gemini-2.5-flash": 1048576,
283
+ "gemini-2.5-flash-lite": 1048576,
284
+ "gemini-2.0-flash": 1048576,
285
+ "gemini-2.0-flash-lite": 1048576
286
+ };
287
+ var GEMINI_MAX_OUTPUT = {
288
+ "gemini-2.5-pro": 65536,
289
+ "gemini-2.5-flash": 65536,
290
+ "gemini-2.5-flash-lite": 65536,
291
+ "gemini-2.0-flash": 8192,
292
+ "gemini-2.0-flash-lite": 8192
293
+ };
294
+ function calculateGeminiCost(model, usage) {
295
+ const pricing = findGeminiPricing(model);
296
+ if (!pricing)
297
+ return 0;
298
+ const inputCost = usage.inputTokens / 1e6 * pricing.inputPerMillion;
299
+ const outputCost = usage.outputTokens / 1e6 * pricing.outputPerMillion;
300
+ const cacheReadCost = usage.cacheReadInputTokens / 1e6 * (pricing.cacheReadPerMillion ?? pricing.inputPerMillion);
301
+ return inputCost + outputCost + cacheReadCost;
302
+ }
303
+ function findGeminiPricing(model) {
304
+ if (GEMINI_PRICING[model])
305
+ return GEMINI_PRICING[model];
306
+ let bestKey = "";
307
+ let bestPricing;
308
+ for (const [key, pricing] of Object.entries(GEMINI_PRICING)) {
309
+ if (model.startsWith(key) && key.length > bestKey.length) {
310
+ bestKey = key;
311
+ bestPricing = pricing;
312
+ }
313
+ }
314
+ return bestPricing;
315
+ }
316
+
317
+ // src/providers/gemini.ts
318
+ import { GoogleGenAI } from "@google/genai";
319
+ var CODE_ASSIST_ENDPOINT = "https://cloudcode-pa.googleapis.com";
320
+ var CODE_ASSIST_API_VERSION = "v1internal";
321
+
322
+ class GeminiAdapter {
323
+ name = "gemini";
324
+ client = null;
325
+ oauthMode;
326
+ currentAccessToken;
327
+ projectId;
328
+ constructor(options) {
329
+ const key = options?.apiKey ?? process.env.GEMINI_API_KEY;
330
+ if (key) {
331
+ this.oauthMode = false;
332
+ this.client = new GoogleGenAI({ apiKey: key });
333
+ } else {
334
+ const tokens = loadTokensSync2();
335
+ if (tokens) {
336
+ this.oauthMode = true;
337
+ this.currentAccessToken = tokens.access_token;
338
+ } else {
339
+ this.oauthMode = false;
340
+ this.client = new GoogleGenAI({ apiKey: "" });
341
+ }
342
+ }
343
+ }
344
+ async* chat(request) {
345
+ if (this.oauthMode) {
346
+ yield* this.chatCodeAssist(request);
347
+ } else {
348
+ yield* this.chatSdk(request);
349
+ }
350
+ }
351
+ calculateCost(model, usage) {
352
+ return calculateGeminiCost(model, usage);
353
+ }
354
+ getContextWindow(model) {
355
+ return GEMINI_CONTEXT_WINDOWS[model] ?? 1048576;
356
+ }
357
+ supportsFeature(feature) {
358
+ switch (feature) {
359
+ case "streaming":
360
+ case "tool_calling":
361
+ case "image_input":
362
+ case "structured_output":
363
+ case "thinking":
364
+ case "pdf_input":
365
+ return true;
366
+ default:
367
+ return false;
368
+ }
369
+ }
370
+ async* chatSdk(request) {
371
+ const contents = this.convertMessages(request.messages);
372
+ const tools = request.tools ? this.convertTools(request.tools) : undefined;
373
+ const maxTokens = request.maxTokens ?? GEMINI_MAX_OUTPUT[request.model] ?? 65536;
374
+ const config = {
375
+ maxOutputTokens: maxTokens,
376
+ abortSignal: request.signal ?? undefined
377
+ };
378
+ if (request.systemPrompt) {
379
+ config.systemInstruction = request.systemPrompt;
380
+ }
381
+ if (request.temperature !== undefined) {
382
+ config.temperature = request.temperature;
383
+ }
384
+ if (tools) {
385
+ config.tools = [{ functionDeclarations: tools }];
386
+ }
387
+ const stream = await this.client.models.generateContentStream({
388
+ model: request.model,
389
+ contents,
390
+ config
391
+ });
392
+ let hasToolCalls = false;
393
+ for await (const chunk of stream) {
394
+ if (chunk.usageMetadata) {
395
+ const u = chunk.usageMetadata;
396
+ const cached = u.cachedContentTokenCount ?? 0;
397
+ yield {
398
+ type: "usage",
399
+ usage: {
400
+ inputTokens: (u.promptTokenCount ?? 0) - cached,
401
+ outputTokens: u.candidatesTokenCount ?? 0,
402
+ cacheReadInputTokens: cached,
403
+ cacheCreationInputTokens: 0
404
+ }
405
+ };
406
+ }
407
+ const candidate = chunk.candidates?.[0];
408
+ if (!candidate?.content?.parts)
409
+ continue;
410
+ for (const part of candidate.content.parts) {
411
+ if (part.text) {
412
+ if (part.thought) {
413
+ yield { type: "thinking_delta", text: part.text };
414
+ } else {
415
+ yield { type: "text_delta", text: part.text };
416
+ }
417
+ }
418
+ if (part.functionCall) {
419
+ hasToolCalls = true;
420
+ yield {
421
+ type: "tool_call",
422
+ id: part.functionCall.id ?? crypto.randomUUID(),
423
+ name: part.functionCall.name ?? "",
424
+ input: part.functionCall.args ?? {}
425
+ };
426
+ }
427
+ }
428
+ if (candidate.finishReason) {
429
+ yield {
430
+ type: "done",
431
+ stopReason: this.mapFinishReason(candidate.finishReason, hasToolCalls)
432
+ };
433
+ return;
434
+ }
435
+ }
436
+ yield { type: "done", stopReason: hasToolCalls ? "tool_use" : "end_turn" };
437
+ }
438
+ async* chatCodeAssist(request) {
439
+ await this.refreshTokenIfNeeded();
440
+ await this.ensureProjectId();
441
+ const contents = this.convertMessages(request.messages);
442
+ const tools = request.tools ? this.convertTools(request.tools) : undefined;
443
+ const maxTokens = request.maxTokens ?? GEMINI_MAX_OUTPUT[request.model] ?? 65536;
444
+ const generationConfig = {
445
+ maxOutputTokens: maxTokens
446
+ };
447
+ if (request.temperature !== undefined) {
448
+ generationConfig.temperature = request.temperature;
449
+ }
450
+ const innerRequest = {
451
+ contents,
452
+ generationConfig
453
+ };
454
+ if (request.systemPrompt) {
455
+ innerRequest.systemInstruction = {
456
+ parts: [{ text: request.systemPrompt }]
457
+ };
458
+ }
459
+ if (tools) {
460
+ innerRequest.tools = [{ functionDeclarations: tools }];
461
+ }
462
+ const body = {
463
+ model: request.model,
464
+ project: this.projectId,
465
+ request: innerRequest
466
+ };
467
+ const url = `${CODE_ASSIST_ENDPOINT}/${CODE_ASSIST_API_VERSION}:streamGenerateContent?alt=sse`;
468
+ const res = await fetch(url, {
469
+ method: "POST",
470
+ headers: {
471
+ "Content-Type": "application/json",
472
+ Authorization: `Bearer ${this.currentAccessToken}`
473
+ },
474
+ body: JSON.stringify(body),
475
+ signal: request.signal ?? undefined
476
+ });
477
+ if (!res.ok) {
478
+ const text = await res.text();
479
+ throw new Error(`Gemini Code Assist API error (${res.status}): ${text}`);
480
+ }
481
+ yield* this.parseSSEStream(res);
482
+ }
483
+ async* parseSSEStream(res) {
484
+ const reader = res.body.getReader();
485
+ const decoder = new TextDecoder;
486
+ let buffer = "";
487
+ let hasToolCalls = false;
488
+ let dataLines = [];
489
+ while (true) {
490
+ const { done, value } = await reader.read();
491
+ if (done)
492
+ break;
493
+ buffer += decoder.decode(value, { stream: true });
494
+ const lines = buffer.split(`
495
+ `);
496
+ buffer = lines.pop() || "";
497
+ for (const line of lines) {
498
+ if (line.startsWith("data: ")) {
499
+ dataLines.push(line.slice(6));
500
+ } else if (line.trim() === "" && dataLines.length > 0) {
501
+ const json = dataLines.join(`
502
+ `);
503
+ dataLines = [];
504
+ let obj;
505
+ try {
506
+ obj = JSON.parse(json);
507
+ } catch {
508
+ continue;
509
+ }
510
+ const response = obj.response ?? obj;
511
+ const candidate = response?.candidates?.[0];
512
+ if (candidate?.content?.parts) {
513
+ for (const part of candidate.content.parts) {
514
+ if (part.text) {
515
+ if (part.thought) {
516
+ yield { type: "thinking_delta", text: part.text };
517
+ } else {
518
+ yield { type: "text_delta", text: part.text };
519
+ }
520
+ }
521
+ if (part.functionCall) {
522
+ hasToolCalls = true;
523
+ yield {
524
+ type: "tool_call",
525
+ id: part.functionCall.id ?? crypto.randomUUID(),
526
+ name: part.functionCall.name ?? "",
527
+ input: part.functionCall.args ?? {}
528
+ };
529
+ }
530
+ }
531
+ }
532
+ if (response?.usageMetadata) {
533
+ const u = response.usageMetadata;
534
+ const cached = u.cachedContentTokenCount ?? 0;
535
+ yield {
536
+ type: "usage",
537
+ usage: {
538
+ inputTokens: (u.promptTokenCount ?? 0) - cached,
539
+ outputTokens: u.candidatesTokenCount ?? 0,
540
+ cacheReadInputTokens: cached,
541
+ cacheCreationInputTokens: 0
542
+ }
543
+ };
544
+ }
545
+ if (candidate?.finishReason) {
546
+ yield {
547
+ type: "done",
548
+ stopReason: this.mapFinishReason(candidate.finishReason, hasToolCalls)
549
+ };
550
+ return;
551
+ }
552
+ }
553
+ }
554
+ }
555
+ if (dataLines.length > 0) {
556
+ const json = dataLines.join(`
557
+ `);
558
+ try {
559
+ const obj = JSON.parse(json);
560
+ const response = obj.response ?? obj;
561
+ const candidate = response?.candidates?.[0];
562
+ if (candidate?.finishReason) {
563
+ yield {
564
+ type: "done",
565
+ stopReason: this.mapFinishReason(candidate.finishReason, hasToolCalls)
566
+ };
567
+ return;
568
+ }
569
+ } catch {}
570
+ }
571
+ yield { type: "done", stopReason: hasToolCalls ? "tool_use" : "end_turn" };
572
+ }
573
+ async refreshTokenIfNeeded() {
574
+ if (!this.oauthMode)
575
+ return;
576
+ try {
577
+ const { getValidToken: getValidToken2 } = await Promise.resolve().then(() => (init_gemini_oauth(), exports_gemini_oauth));
578
+ const result = await getValidToken2();
579
+ if (result && result.accessToken !== this.currentAccessToken) {
580
+ this.currentAccessToken = result.accessToken;
581
+ }
582
+ } catch {}
583
+ }
584
+ async ensureProjectId() {
585
+ if (this.projectId)
586
+ return;
587
+ this.projectId = process.env.GOOGLE_CLOUD_PROJECT ?? process.env.GOOGLE_CLOUD_PROJECT_ID ?? undefined;
588
+ if (this.projectId)
589
+ return;
590
+ try {
591
+ const res = await fetch(`${CODE_ASSIST_ENDPOINT}/${CODE_ASSIST_API_VERSION}:loadCodeAssist`, {
592
+ method: "POST",
593
+ headers: {
594
+ "Content-Type": "application/json",
595
+ Authorization: `Bearer ${this.currentAccessToken}`
596
+ },
597
+ body: JSON.stringify({
598
+ metadata: {
599
+ ideType: "IDE_UNSPECIFIED",
600
+ platform: "PLATFORM_UNSPECIFIED",
601
+ pluginType: "GEMINI"
602
+ }
603
+ })
604
+ });
605
+ if (res.ok) {
606
+ const data = await res.json();
607
+ this.projectId = data.cloudaicompanionProject;
608
+ }
609
+ } catch {}
610
+ }
611
+ convertMessages(messages) {
612
+ const result = [];
613
+ for (const msg of messages) {
614
+ if (typeof msg.content === "string") {
615
+ result.push({
616
+ role: msg.role === "assistant" ? "model" : "user",
617
+ parts: [{ text: msg.content }]
618
+ });
619
+ continue;
620
+ }
621
+ if (msg.role === "assistant") {
622
+ const parts = [];
623
+ for (const block of msg.content) {
624
+ if (block.type === "text") {
625
+ parts.push({ text: block.text });
626
+ } else if (block.type === "tool_use") {
627
+ parts.push({
628
+ functionCall: {
629
+ name: block.name,
630
+ args: block.input
631
+ }
632
+ });
633
+ }
634
+ }
635
+ if (parts.length > 0) {
636
+ result.push({ role: "model", parts });
637
+ }
638
+ } else {
639
+ const textParts = [];
640
+ const functionResponseParts = [];
641
+ for (const block of msg.content) {
642
+ if (block.type === "text") {
643
+ textParts.push({ text: block.text });
644
+ } else if (block.type === "tool_result") {
645
+ functionResponseParts.push({
646
+ functionResponse: {
647
+ name: findToolName(messages, block.tool_use_id) ?? "unknown",
648
+ response: { result: block.content }
649
+ }
650
+ });
651
+ }
652
+ }
653
+ if (functionResponseParts.length > 0) {
654
+ result.push({ role: "user", parts: functionResponseParts });
655
+ }
656
+ if (textParts.length > 0) {
657
+ result.push({ role: "user", parts: textParts });
658
+ }
659
+ }
660
+ }
661
+ return result;
662
+ }
663
+ convertTools(tools) {
664
+ return tools.map((tool) => ({
665
+ name: tool.name,
666
+ description: tool.description,
667
+ parameters: tool.inputSchema
668
+ }));
669
+ }
670
+ mapFinishReason(reason, hasToolCalls) {
671
+ if (hasToolCalls)
672
+ return "tool_use";
673
+ switch (reason) {
674
+ case "STOP":
675
+ return "end_turn";
676
+ case "MAX_TOKENS":
677
+ return "max_tokens";
678
+ default:
679
+ return "end_turn";
680
+ }
681
+ }
682
+ }
683
+ function findToolName(messages, toolUseId) {
684
+ for (const msg of messages) {
685
+ if (msg.role !== "assistant" || typeof msg.content === "string")
686
+ continue;
687
+ for (const block of msg.content) {
688
+ if (block.type === "tool_use" && block.id === toolUseId) {
689
+ return block.name;
690
+ }
691
+ }
692
+ }
693
+ return;
694
+ }
695
+ function loadTokensSync2() {
696
+ const home = process.env.HOME ?? __require("os").homedir();
697
+ const path = `${home}/.gemini/oauth_creds.json`;
698
+ try {
699
+ const fs = __require("fs");
700
+ const raw = fs.readFileSync(path, "utf-8");
701
+ const data = JSON.parse(raw);
702
+ if (data.access_token && data.refresh_token) {
703
+ return data;
704
+ }
705
+ } catch {}
706
+ return null;
707
+ }
708
+ export {
709
+ GeminiAdapter
710
+ };