@yourgpt/llm-sdk 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +61 -40
  2. package/dist/adapters/index.d.mts +4 -258
  3. package/dist/adapters/index.d.ts +4 -258
  4. package/dist/adapters/index.js +0 -113
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +1 -112
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/base-D_FyHFKj.d.mts +235 -0
  9. package/dist/base-D_FyHFKj.d.ts +235 -0
  10. package/dist/index.d.mts +145 -450
  11. package/dist/index.d.ts +145 -450
  12. package/dist/index.js +1837 -307
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +1827 -305
  15. package/dist/index.mjs.map +1 -1
  16. package/dist/providers/anthropic/index.d.mts +61 -0
  17. package/dist/providers/anthropic/index.d.ts +61 -0
  18. package/dist/providers/anthropic/index.js +939 -0
  19. package/dist/providers/anthropic/index.js.map +1 -0
  20. package/dist/providers/anthropic/index.mjs +934 -0
  21. package/dist/providers/anthropic/index.mjs.map +1 -0
  22. package/dist/providers/azure/index.d.mts +38 -0
  23. package/dist/providers/azure/index.d.ts +38 -0
  24. package/dist/providers/azure/index.js +380 -0
  25. package/dist/providers/azure/index.js.map +1 -0
  26. package/dist/providers/azure/index.mjs +377 -0
  27. package/dist/providers/azure/index.mjs.map +1 -0
  28. package/dist/providers/google/index.d.mts +72 -0
  29. package/dist/providers/google/index.d.ts +72 -0
  30. package/dist/providers/google/index.js +790 -0
  31. package/dist/providers/google/index.js.map +1 -0
  32. package/dist/providers/google/index.mjs +785 -0
  33. package/dist/providers/google/index.mjs.map +1 -0
  34. package/dist/providers/ollama/index.d.mts +24 -0
  35. package/dist/providers/ollama/index.d.ts +24 -0
  36. package/dist/providers/ollama/index.js +235 -0
  37. package/dist/providers/ollama/index.js.map +1 -0
  38. package/dist/providers/ollama/index.mjs +232 -0
  39. package/dist/providers/ollama/index.mjs.map +1 -0
  40. package/dist/providers/openai/index.d.mts +82 -0
  41. package/dist/providers/openai/index.d.ts +82 -0
  42. package/dist/providers/openai/index.js +679 -0
  43. package/dist/providers/openai/index.js.map +1 -0
  44. package/dist/providers/openai/index.mjs +674 -0
  45. package/dist/providers/openai/index.mjs.map +1 -0
  46. package/dist/providers/xai/index.d.mts +78 -0
  47. package/dist/providers/xai/index.d.ts +78 -0
  48. package/dist/providers/xai/index.js +671 -0
  49. package/dist/providers/xai/index.js.map +1 -0
  50. package/dist/providers/xai/index.mjs +666 -0
  51. package/dist/providers/xai/index.mjs.map +1 -0
  52. package/dist/types-BBCZ3Fxy.d.mts +308 -0
  53. package/dist/types-CdORv1Yu.d.mts +338 -0
  54. package/dist/types-CdORv1Yu.d.ts +338 -0
  55. package/dist/types-DcoCaVVC.d.ts +308 -0
  56. package/package.json +34 -3
@@ -0,0 +1,785 @@
1
+ import { generateMessageId, generateToolCallId } from '@yourgpt/copilot-sdk/core';
2
+
3
+ // src/providers/google/provider.ts
4
+ var GOOGLE_MODELS = {
5
+ // Gemini 2.0
6
+ "gemini-2.0-flash": {
7
+ vision: true,
8
+ tools: true,
9
+ audio: true,
10
+ video: true,
11
+ maxTokens: 1048576
12
+ },
13
+ "gemini-2.0-flash-exp": {
14
+ vision: true,
15
+ tools: true,
16
+ audio: true,
17
+ video: true,
18
+ maxTokens: 1048576
19
+ },
20
+ "gemini-2.0-flash-thinking-exp": {
21
+ vision: true,
22
+ tools: false,
23
+ audio: false,
24
+ video: false,
25
+ maxTokens: 32767
26
+ },
27
+ // Gemini 1.5
28
+ "gemini-1.5-pro": {
29
+ vision: true,
30
+ tools: true,
31
+ audio: true,
32
+ video: true,
33
+ maxTokens: 2097152
34
+ },
35
+ "gemini-1.5-pro-latest": {
36
+ vision: true,
37
+ tools: true,
38
+ audio: true,
39
+ video: true,
40
+ maxTokens: 2097152
41
+ },
42
+ "gemini-1.5-flash": {
43
+ vision: true,
44
+ tools: true,
45
+ audio: true,
46
+ video: true,
47
+ maxTokens: 1048576
48
+ },
49
+ "gemini-1.5-flash-latest": {
50
+ vision: true,
51
+ tools: true,
52
+ audio: true,
53
+ video: true,
54
+ maxTokens: 1048576
55
+ },
56
+ "gemini-1.5-flash-8b": {
57
+ vision: true,
58
+ tools: true,
59
+ audio: false,
60
+ video: false,
61
+ maxTokens: 1048576
62
+ }
63
+ };
64
+ function google(modelId, options = {}) {
65
+ const apiKey = options.apiKey ?? process.env.GOOGLE_API_KEY ?? process.env.GEMINI_API_KEY;
66
+ let client = null;
67
+ async function getClient() {
68
+ if (!client) {
69
+ const { GoogleGenerativeAI } = await import('@google/generative-ai');
70
+ client = new GoogleGenerativeAI(apiKey);
71
+ }
72
+ return client;
73
+ }
74
+ const modelConfig = GOOGLE_MODELS[modelId] ?? GOOGLE_MODELS["gemini-2.0-flash"];
75
+ return {
76
+ provider: "google",
77
+ modelId,
78
+ capabilities: {
79
+ supportsVision: modelConfig.vision,
80
+ supportsTools: modelConfig.tools,
81
+ supportsStreaming: true,
82
+ supportsJsonMode: true,
83
+ supportsThinking: modelId.includes("thinking"),
84
+ supportsPDF: true,
85
+ maxTokens: modelConfig.maxTokens,
86
+ supportedImageTypes: modelConfig.vision ? ["image/png", "image/jpeg", "image/gif", "image/webp"] : []
87
+ },
88
+ async doGenerate(params) {
89
+ const client2 = await getClient();
90
+ const model = client2.getGenerativeModel({
91
+ model: modelId,
92
+ safetySettings: options.safetySettings
93
+ });
94
+ const { systemInstruction, contents } = formatMessagesForGemini(
95
+ params.messages
96
+ );
97
+ const chat = model.startChat({
98
+ history: contents.slice(0, -1),
99
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
100
+ tools: params.tools ? [{ functionDeclarations: formatToolsForGemini(params.tools) }] : void 0,
101
+ generationConfig: {
102
+ temperature: params.temperature,
103
+ maxOutputTokens: params.maxTokens
104
+ }
105
+ });
106
+ const lastMessage = contents[contents.length - 1];
107
+ const result = await chat.sendMessage(lastMessage.parts);
108
+ const response = result.response;
109
+ let text = "";
110
+ const toolCalls = [];
111
+ let toolCallIndex = 0;
112
+ const candidate = response.candidates?.[0];
113
+ if (candidate?.content?.parts) {
114
+ for (const part of candidate.content.parts) {
115
+ if ("text" in part && part.text) {
116
+ text += part.text;
117
+ }
118
+ if ("functionCall" in part && part.functionCall) {
119
+ toolCalls.push({
120
+ id: `call_${toolCallIndex++}`,
121
+ name: part.functionCall.name,
122
+ args: part.functionCall.args || {}
123
+ });
124
+ }
125
+ }
126
+ }
127
+ return {
128
+ text,
129
+ toolCalls,
130
+ finishReason: mapFinishReason(candidate?.finishReason),
131
+ usage: {
132
+ promptTokens: response.usageMetadata?.promptTokenCount ?? 0,
133
+ completionTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
134
+ totalTokens: response.usageMetadata?.totalTokenCount ?? 0
135
+ },
136
+ rawResponse: response
137
+ };
138
+ },
139
+ async *doStream(params) {
140
+ const client2 = await getClient();
141
+ const model = client2.getGenerativeModel({
142
+ model: modelId,
143
+ safetySettings: options.safetySettings
144
+ });
145
+ const { systemInstruction, contents } = formatMessagesForGemini(
146
+ params.messages
147
+ );
148
+ const chat = model.startChat({
149
+ history: contents.slice(0, -1),
150
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
151
+ tools: params.tools ? [{ functionDeclarations: formatToolsForGemini(params.tools) }] : void 0,
152
+ generationConfig: {
153
+ temperature: params.temperature,
154
+ maxOutputTokens: params.maxTokens
155
+ }
156
+ });
157
+ const lastMessage = contents[contents.length - 1];
158
+ const result = await chat.sendMessageStream(lastMessage.parts);
159
+ let toolCallIndex = 0;
160
+ let promptTokens = 0;
161
+ let completionTokens = 0;
162
+ try {
163
+ for await (const chunk of result.stream) {
164
+ if (params.signal?.aborted) {
165
+ yield { type: "error", error: new Error("Aborted") };
166
+ return;
167
+ }
168
+ const candidate = chunk.candidates?.[0];
169
+ if (!candidate?.content?.parts) continue;
170
+ for (const part of candidate.content.parts) {
171
+ if ("text" in part && part.text) {
172
+ yield { type: "text-delta", text: part.text };
173
+ }
174
+ if ("functionCall" in part && part.functionCall) {
175
+ yield {
176
+ type: "tool-call",
177
+ toolCall: {
178
+ id: `call_${toolCallIndex++}`,
179
+ name: part.functionCall.name,
180
+ args: part.functionCall.args || {}
181
+ }
182
+ };
183
+ }
184
+ }
185
+ if (chunk.usageMetadata) {
186
+ promptTokens = chunk.usageMetadata.promptTokenCount ?? 0;
187
+ completionTokens = chunk.usageMetadata.candidatesTokenCount ?? 0;
188
+ }
189
+ if (candidate.finishReason) {
190
+ yield {
191
+ type: "finish",
192
+ finishReason: mapFinishReason(candidate.finishReason),
193
+ usage: {
194
+ promptTokens,
195
+ completionTokens,
196
+ totalTokens: promptTokens + completionTokens
197
+ }
198
+ };
199
+ }
200
+ }
201
+ } catch (error) {
202
+ yield {
203
+ type: "error",
204
+ error: error instanceof Error ? error : new Error(String(error))
205
+ };
206
+ }
207
+ }
208
+ };
209
+ }
210
+ function mapFinishReason(reason) {
211
+ switch (reason) {
212
+ case "STOP":
213
+ return "stop";
214
+ case "MAX_TOKENS":
215
+ return "length";
216
+ case "SAFETY":
217
+ return "content-filter";
218
+ default:
219
+ return "unknown";
220
+ }
221
+ }
222
+ function formatMessagesForGemini(messages) {
223
+ let systemInstruction = "";
224
+ const contents = [];
225
+ for (const msg of messages) {
226
+ if (msg.role === "system") {
227
+ systemInstruction += (systemInstruction ? "\n" : "") + msg.content;
228
+ continue;
229
+ }
230
+ const parts = [];
231
+ if (msg.role === "user") {
232
+ if (typeof msg.content === "string") {
233
+ parts.push({ text: msg.content });
234
+ } else {
235
+ for (const part of msg.content) {
236
+ if (part.type === "text") {
237
+ parts.push({ text: part.text });
238
+ } else if (part.type === "image") {
239
+ const imageData = typeof part.image === "string" ? part.image : Buffer.from(part.image).toString("base64");
240
+ const base64 = imageData.startsWith("data:") ? imageData.split(",")[1] : imageData;
241
+ parts.push({
242
+ inlineData: {
243
+ mimeType: part.mimeType ?? "image/png",
244
+ data: base64
245
+ }
246
+ });
247
+ }
248
+ }
249
+ }
250
+ contents.push({ role: "user", parts });
251
+ } else if (msg.role === "assistant") {
252
+ if (msg.content) {
253
+ parts.push({ text: msg.content });
254
+ }
255
+ if (msg.toolCalls?.length) {
256
+ for (const tc of msg.toolCalls) {
257
+ parts.push({
258
+ functionCall: {
259
+ name: tc.name,
260
+ args: tc.args
261
+ }
262
+ });
263
+ }
264
+ }
265
+ if (parts.length > 0) {
266
+ contents.push({ role: "model", parts });
267
+ }
268
+ } else if (msg.role === "tool") {
269
+ contents.push({
270
+ role: "user",
271
+ parts: [
272
+ {
273
+ functionResponse: {
274
+ name: "tool",
275
+ // Gemini doesn't track by ID
276
+ response: JSON.parse(msg.content || "{}")
277
+ }
278
+ }
279
+ ]
280
+ });
281
+ }
282
+ }
283
+ if (contents.length === 0 || contents[0].role !== "user") {
284
+ contents.unshift({ role: "user", parts: [{ text: "" }] });
285
+ }
286
+ const merged = [];
287
+ for (const content of contents) {
288
+ const last = merged[merged.length - 1];
289
+ if (last && last.role === content.role) {
290
+ last.parts.push(...content.parts);
291
+ } else {
292
+ merged.push({ ...content, parts: [...content.parts] });
293
+ }
294
+ }
295
+ return { systemInstruction, contents: merged };
296
+ }
297
+ function formatToolsForGemini(tools) {
298
+ return tools.map((t) => ({
299
+ name: t.function.name,
300
+ description: t.function.description,
301
+ parameters: t.function.parameters
302
+ }));
303
+ }
304
+ function attachmentToGeminiPart(attachment) {
305
+ if (!attachment.data) {
306
+ console.warn(
307
+ "Gemini adapter: URL-based attachments not supported, skipping"
308
+ );
309
+ return null;
310
+ }
311
+ if (attachment.type === "image") {
312
+ let base64Data = attachment.data;
313
+ if (base64Data.startsWith("data:")) {
314
+ const commaIndex = base64Data.indexOf(",");
315
+ if (commaIndex !== -1) {
316
+ base64Data = base64Data.slice(commaIndex + 1);
317
+ }
318
+ }
319
+ return {
320
+ inlineData: {
321
+ mimeType: attachment.mimeType || "image/png",
322
+ data: base64Data
323
+ }
324
+ };
325
+ }
326
+ if (attachment.type === "audio" || attachment.type === "video") {
327
+ let base64Data = attachment.data;
328
+ if (base64Data.startsWith("data:")) {
329
+ const commaIndex = base64Data.indexOf(",");
330
+ if (commaIndex !== -1) {
331
+ base64Data = base64Data.slice(commaIndex + 1);
332
+ }
333
+ }
334
+ return {
335
+ inlineData: {
336
+ mimeType: attachment.mimeType || (attachment.type === "audio" ? "audio/mp3" : "video/mp4"),
337
+ data: base64Data
338
+ }
339
+ };
340
+ }
341
+ return null;
342
+ }
343
+ function messageToGeminiContent(msg) {
344
+ if (msg.role === "system") return null;
345
+ const parts = [];
346
+ if (msg.role === "tool" && msg.tool_call_id) {
347
+ let responseData;
348
+ try {
349
+ responseData = JSON.parse(msg.content || "{}");
350
+ } catch {
351
+ responseData = { result: msg.content || "" };
352
+ }
353
+ const toolName = msg.metadata?.toolName || "tool";
354
+ parts.push({
355
+ functionResponse: {
356
+ name: toolName,
357
+ response: responseData
358
+ }
359
+ });
360
+ return { role: "user", parts };
361
+ }
362
+ if (msg.content) {
363
+ parts.push({ text: msg.content });
364
+ }
365
+ const attachments = msg.metadata?.attachments;
366
+ if (attachments && Array.isArray(attachments)) {
367
+ for (const attachment of attachments) {
368
+ const part = attachmentToGeminiPart(attachment);
369
+ if (part) {
370
+ parts.push(part);
371
+ }
372
+ }
373
+ }
374
+ if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
375
+ for (const tc of msg.tool_calls) {
376
+ let args = {};
377
+ try {
378
+ args = JSON.parse(tc.function.arguments);
379
+ } catch {
380
+ }
381
+ parts.push({
382
+ functionCall: {
383
+ name: tc.function.name,
384
+ args
385
+ }
386
+ });
387
+ }
388
+ }
389
+ if (parts.length === 0) return null;
390
+ return {
391
+ role: msg.role === "assistant" ? "model" : "user",
392
+ parts
393
+ };
394
+ }
395
+ function formatToolsForGemini2(actions) {
396
+ if (!actions || actions.length === 0) return void 0;
397
+ return {
398
+ functionDeclarations: actions.map((action) => ({
399
+ name: action.name,
400
+ description: action.description,
401
+ parameters: action.parameters ? {
402
+ type: "object",
403
+ properties: Object.fromEntries(
404
+ Object.entries(action.parameters).map(([key, param]) => [
405
+ key,
406
+ {
407
+ type: param.type,
408
+ description: param.description,
409
+ enum: param.enum
410
+ }
411
+ ])
412
+ ),
413
+ required: Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key)
414
+ } : void 0
415
+ }))
416
+ };
417
+ }
418
+ var GoogleAdapter = class {
419
+ constructor(config) {
420
+ this.provider = "google";
421
+ this.config = config;
422
+ this.model = config.model || "gemini-2.0-flash";
423
+ }
424
+ async getClient() {
425
+ if (!this.client) {
426
+ const { GoogleGenerativeAI } = await import('@google/generative-ai');
427
+ this.client = new GoogleGenerativeAI(this.config.apiKey);
428
+ }
429
+ return this.client;
430
+ }
431
+ async *stream(request) {
432
+ const client = await this.getClient();
433
+ const modelId = request.config?.model || this.model;
434
+ const model = client.getGenerativeModel({
435
+ model: modelId,
436
+ safetySettings: this.config.safetySettings
437
+ });
438
+ let contents = [];
439
+ let systemInstruction;
440
+ if (request.rawMessages && request.rawMessages.length > 0) {
441
+ for (const msg of request.rawMessages) {
442
+ if (msg.role === "system") {
443
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
444
+ continue;
445
+ }
446
+ const content = messageToGeminiContent(msg);
447
+ if (content) {
448
+ contents.push(content);
449
+ }
450
+ }
451
+ if (request.systemPrompt && !systemInstruction) {
452
+ systemInstruction = request.systemPrompt;
453
+ }
454
+ } else {
455
+ for (const msg of request.messages) {
456
+ if (msg.role === "system") {
457
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
458
+ continue;
459
+ }
460
+ const content = messageToGeminiContent(msg);
461
+ if (content) {
462
+ contents.push(content);
463
+ }
464
+ }
465
+ if (request.systemPrompt) {
466
+ systemInstruction = request.systemPrompt;
467
+ }
468
+ }
469
+ if (contents.length === 0 || contents[0].role !== "user") {
470
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
471
+ }
472
+ const mergedContents = [];
473
+ for (const content of contents) {
474
+ const last = mergedContents[mergedContents.length - 1];
475
+ if (last && last.role === content.role) {
476
+ last.parts.push(...content.parts);
477
+ } else {
478
+ mergedContents.push({ ...content, parts: [...content.parts] });
479
+ }
480
+ }
481
+ const tools = formatToolsForGemini2(request.actions);
482
+ const messageId = generateMessageId();
483
+ yield { type: "message:start", id: messageId };
484
+ try {
485
+ const chat = model.startChat({
486
+ history: mergedContents.slice(0, -1),
487
+ // All but the last message
488
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
489
+ tools: tools ? [tools] : void 0,
490
+ generationConfig: {
491
+ temperature: request.config?.temperature ?? this.config.temperature,
492
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
493
+ }
494
+ });
495
+ const lastMessage = mergedContents[mergedContents.length - 1];
496
+ const result = await chat.sendMessageStream(lastMessage.parts);
497
+ let currentToolCall = null;
498
+ for await (const chunk of result.stream) {
499
+ if (request.signal?.aborted) {
500
+ break;
501
+ }
502
+ const candidate = chunk.candidates?.[0];
503
+ if (!candidate?.content?.parts) continue;
504
+ for (const part of candidate.content.parts) {
505
+ if ("text" in part && part.text) {
506
+ yield { type: "message:delta", content: part.text };
507
+ }
508
+ if ("functionCall" in part && part.functionCall) {
509
+ const fc = part.functionCall;
510
+ const toolId = generateToolCallId();
511
+ if (currentToolCall) {
512
+ yield {
513
+ type: "action:args",
514
+ id: currentToolCall.id,
515
+ args: JSON.stringify(currentToolCall.args)
516
+ };
517
+ }
518
+ currentToolCall = {
519
+ id: toolId,
520
+ name: fc.name,
521
+ args: fc.args || {}
522
+ };
523
+ yield {
524
+ type: "action:start",
525
+ id: toolId,
526
+ name: fc.name
527
+ };
528
+ }
529
+ }
530
+ if (candidate.finishReason) {
531
+ if (currentToolCall) {
532
+ yield {
533
+ type: "action:args",
534
+ id: currentToolCall.id,
535
+ args: JSON.stringify(currentToolCall.args)
536
+ };
537
+ }
538
+ }
539
+ }
540
+ yield { type: "message:end" };
541
+ yield { type: "done" };
542
+ } catch (error) {
543
+ yield {
544
+ type: "error",
545
+ message: error instanceof Error ? error.message : "Unknown error",
546
+ code: "GOOGLE_ERROR"
547
+ };
548
+ }
549
+ }
550
+ /**
551
+ * Non-streaming completion (optional, for debugging)
552
+ */
553
+ async complete(request) {
554
+ const client = await this.getClient();
555
+ const modelId = request.config?.model || this.model;
556
+ const model = client.getGenerativeModel({
557
+ model: modelId,
558
+ safetySettings: this.config.safetySettings
559
+ });
560
+ let contents = [];
561
+ let systemInstruction;
562
+ for (const msg of request.messages) {
563
+ if (msg.role === "system") {
564
+ systemInstruction = (systemInstruction || "") + (msg.content || "");
565
+ continue;
566
+ }
567
+ const content = messageToGeminiContent(msg);
568
+ if (content) {
569
+ contents.push(content);
570
+ }
571
+ }
572
+ if (request.systemPrompt) {
573
+ systemInstruction = request.systemPrompt;
574
+ }
575
+ if (contents.length === 0 || contents[0].role !== "user") {
576
+ contents = [{ role: "user", parts: [{ text: "" }] }, ...contents];
577
+ }
578
+ const mergedContents = [];
579
+ for (const content of contents) {
580
+ const last = mergedContents[mergedContents.length - 1];
581
+ if (last && last.role === content.role) {
582
+ last.parts.push(...content.parts);
583
+ } else {
584
+ mergedContents.push({ ...content, parts: [...content.parts] });
585
+ }
586
+ }
587
+ const tools = formatToolsForGemini2(request.actions);
588
+ const chat = model.startChat({
589
+ history: mergedContents.slice(0, -1),
590
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
591
+ tools: tools ? [tools] : void 0,
592
+ generationConfig: {
593
+ temperature: request.config?.temperature ?? this.config.temperature,
594
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
595
+ }
596
+ });
597
+ const lastMessage = mergedContents[mergedContents.length - 1];
598
+ const result = await chat.sendMessage(lastMessage.parts);
599
+ const response = result.response;
600
+ let textContent = "";
601
+ const toolCalls = [];
602
+ const candidate = response.candidates?.[0];
603
+ if (candidate?.content?.parts) {
604
+ for (const part of candidate.content.parts) {
605
+ if ("text" in part && part.text) {
606
+ textContent += part.text;
607
+ }
608
+ if ("functionCall" in part && part.functionCall) {
609
+ toolCalls.push({
610
+ id: generateToolCallId(),
611
+ name: part.functionCall.name,
612
+ args: part.functionCall.args || {}
613
+ });
614
+ }
615
+ }
616
+ }
617
+ return {
618
+ content: textContent,
619
+ toolCalls,
620
+ rawResponse: response
621
+ };
622
+ }
623
+ };
624
+ function createGoogleAdapter(config) {
625
+ return new GoogleAdapter(config);
626
+ }
627
+
628
+ // src/providers/google/index.ts
629
+ var GOOGLE_MODELS2 = {
630
+ // Gemini 2.0 series (latest)
631
+ "gemini-2.0-flash": {
632
+ vision: true,
633
+ tools: true,
634
+ audio: true,
635
+ video: true,
636
+ pdf: true,
637
+ maxTokens: 1e6,
638
+ outputTokens: 8192
639
+ },
640
+ "gemini-2.0-flash-lite": {
641
+ vision: true,
642
+ tools: true,
643
+ audio: false,
644
+ video: false,
645
+ pdf: true,
646
+ maxTokens: 1e6,
647
+ outputTokens: 8192
648
+ },
649
+ // Gemini 2.5 series (experimental)
650
+ "gemini-2.5-pro-preview-05-06": {
651
+ vision: true,
652
+ tools: true,
653
+ audio: true,
654
+ video: true,
655
+ pdf: true,
656
+ maxTokens: 1e6,
657
+ outputTokens: 65536
658
+ },
659
+ "gemini-2.5-flash-preview-05-20": {
660
+ vision: true,
661
+ tools: true,
662
+ audio: true,
663
+ video: true,
664
+ pdf: true,
665
+ maxTokens: 1e6,
666
+ outputTokens: 65536
667
+ },
668
+ // Gemini 1.5 series
669
+ "gemini-1.5-pro": {
670
+ vision: true,
671
+ tools: true,
672
+ audio: true,
673
+ video: true,
674
+ pdf: true,
675
+ maxTokens: 2e6,
676
+ outputTokens: 8192
677
+ },
678
+ "gemini-1.5-pro-latest": {
679
+ vision: true,
680
+ tools: true,
681
+ audio: true,
682
+ video: true,
683
+ pdf: true,
684
+ maxTokens: 2e6,
685
+ outputTokens: 8192
686
+ },
687
+ "gemini-1.5-flash": {
688
+ vision: true,
689
+ tools: true,
690
+ audio: true,
691
+ video: true,
692
+ pdf: true,
693
+ maxTokens: 1e6,
694
+ outputTokens: 8192
695
+ },
696
+ "gemini-1.5-flash-latest": {
697
+ vision: true,
698
+ tools: true,
699
+ audio: true,
700
+ video: true,
701
+ pdf: true,
702
+ maxTokens: 1e6,
703
+ outputTokens: 8192
704
+ },
705
+ "gemini-1.5-flash-8b": {
706
+ vision: true,
707
+ tools: true,
708
+ audio: false,
709
+ video: false,
710
+ pdf: true,
711
+ maxTokens: 1e6,
712
+ outputTokens: 8192
713
+ },
714
+ // Gemini 1.0 series (legacy)
715
+ "gemini-1.0-pro": {
716
+ vision: false,
717
+ tools: true,
718
+ audio: false,
719
+ video: false,
720
+ pdf: false,
721
+ maxTokens: 30720,
722
+ outputTokens: 2048
723
+ }
724
+ };
725
+ function createGoogle(config = {}) {
726
+ const apiKey = config.apiKey ?? process.env.GOOGLE_API_KEY ?? "";
727
+ return {
728
+ name: "google",
729
+ supportedModels: Object.keys(GOOGLE_MODELS2),
730
+ languageModel(modelId) {
731
+ return createGoogleAdapter({
732
+ apiKey,
733
+ model: modelId,
734
+ baseUrl: config.baseUrl,
735
+ safetySettings: config.safetySettings
736
+ });
737
+ },
738
+ getCapabilities(modelId) {
739
+ const model = GOOGLE_MODELS2[modelId] ?? GOOGLE_MODELS2["gemini-2.0-flash"];
740
+ return {
741
+ supportsVision: model.vision,
742
+ supportsTools: model.tools,
743
+ supportsThinking: false,
744
+ // Gemini doesn't have extended thinking like Claude
745
+ supportsStreaming: true,
746
+ supportsPDF: model.pdf,
747
+ supportsAudio: model.audio,
748
+ supportsVideo: model.video,
749
+ maxTokens: model.maxTokens,
750
+ supportedImageTypes: model.vision ? [
751
+ "image/png",
752
+ "image/jpeg",
753
+ "image/gif",
754
+ "image/webp",
755
+ "image/heic",
756
+ "image/heif"
757
+ ] : [],
758
+ supportedAudioTypes: model.audio ? [
759
+ "audio/mp3",
760
+ "audio/wav",
761
+ "audio/aiff",
762
+ "audio/aac",
763
+ "audio/ogg",
764
+ "audio/flac"
765
+ ] : [],
766
+ supportedVideoTypes: model.video ? [
767
+ "video/mp4",
768
+ "video/mpeg",
769
+ "video/mov",
770
+ "video/avi",
771
+ "video/webm",
772
+ "video/mkv"
773
+ ] : [],
774
+ supportsJsonMode: true,
775
+ // Gemini supports JSON mode
776
+ supportsSystemMessages: true
777
+ };
778
+ }
779
+ };
780
+ }
781
+ var createGoogleProvider = createGoogle;
782
+
783
+ export { createGoogle, google as createGoogleModel, createGoogleProvider, google };
784
+ //# sourceMappingURL=index.mjs.map
785
+ //# sourceMappingURL=index.mjs.map