@ai-sdk/groq 0.0.0-013d7476-20250808163325 → 0.0.0-4115c213-20260122152721

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -4,7 +4,8 @@ import {
4
4
  } from "@ai-sdk/provider";
5
5
  import {
6
6
  loadApiKey,
7
- withoutTrailingSlash
7
+ withoutTrailingSlash,
8
+ withUserAgentSuffix
8
9
  } from "@ai-sdk/provider-utils";
9
10
 
10
11
  // src/groq-chat-language-model.ts
@@ -22,11 +23,52 @@ import {
22
23
  } from "@ai-sdk/provider-utils";
23
24
  import { z as z3 } from "zod/v4";
24
25
 
26
+ // src/convert-groq-usage.ts
27
+ function convertGroqUsage(usage) {
28
+ var _a, _b, _c, _d;
29
+ if (usage == null) {
30
+ return {
31
+ inputTokens: {
32
+ total: void 0,
33
+ noCache: void 0,
34
+ cacheRead: void 0,
35
+ cacheWrite: void 0
36
+ },
37
+ outputTokens: {
38
+ total: void 0,
39
+ text: void 0,
40
+ reasoning: void 0
41
+ },
42
+ raw: void 0
43
+ };
44
+ }
45
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
46
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
47
+ const reasoningTokens = (_d = (_c = usage.completion_tokens_details) == null ? void 0 : _c.reasoning_tokens) != null ? _d : void 0;
48
+ const textTokens = reasoningTokens != null ? completionTokens - reasoningTokens : completionTokens;
49
+ return {
50
+ inputTokens: {
51
+ total: promptTokens,
52
+ noCache: promptTokens,
53
+ cacheRead: void 0,
54
+ cacheWrite: void 0
55
+ },
56
+ outputTokens: {
57
+ total: completionTokens,
58
+ text: textTokens,
59
+ reasoning: reasoningTokens
60
+ },
61
+ raw: usage
62
+ };
63
+ }
64
+
25
65
  // src/convert-to-groq-chat-messages.ts
26
66
  import {
27
67
  UnsupportedFunctionalityError
28
68
  } from "@ai-sdk/provider";
69
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
29
70
  function convertToGroqChatMessages(prompt) {
71
+ var _a;
30
72
  const messages = [];
31
73
  for (const { role, content } of prompt) {
32
74
  switch (role) {
@@ -56,7 +98,7 @@ function convertToGroqChatMessages(prompt) {
56
98
  return {
57
99
  type: "image_url",
58
100
  image_url: {
59
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
101
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`
60
102
  }
61
103
  };
62
104
  }
@@ -67,9 +109,16 @@ function convertToGroqChatMessages(prompt) {
67
109
  }
68
110
  case "assistant": {
69
111
  let text = "";
112
+ let reasoning = "";
70
113
  const toolCalls = [];
71
114
  for (const part of content) {
72
115
  switch (part.type) {
116
+ // groq supports reasoning for tool-calls in multi-turn conversations
117
+ // https://github.com/vercel/ai/issues/7860
118
+ case "reasoning": {
119
+ reasoning += part.text;
120
+ break;
121
+ }
73
122
  case "text": {
74
123
  text += part.text;
75
124
  break;
@@ -90,12 +139,16 @@ function convertToGroqChatMessages(prompt) {
90
139
  messages.push({
91
140
  role: "assistant",
92
141
  content: text,
93
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
142
+ ...reasoning.length > 0 ? { reasoning } : null,
143
+ ...toolCalls.length > 0 ? { tool_calls: toolCalls } : null
94
144
  });
95
145
  break;
96
146
  }
97
147
  case "tool": {
98
148
  for (const toolResponse of content) {
149
+ if (toolResponse.type === "tool-approval-response") {
150
+ continue;
151
+ }
99
152
  const output = toolResponse.output;
100
153
  let contentValue;
101
154
  switch (output.type) {
@@ -103,6 +156,9 @@ function convertToGroqChatMessages(prompt) {
103
156
  case "error-text":
104
157
  contentValue = output.value;
105
158
  break;
159
+ case "execution-denied":
160
+ contentValue = (_a = output.reason) != null ? _a : "Tool execution denied.";
161
+ break;
106
162
  case "content":
107
163
  case "json":
108
164
  case "error-json":
@@ -143,7 +199,11 @@ function getResponseMetadata({
143
199
  import { z } from "zod/v4";
144
200
  var groqProviderOptions = z.object({
145
201
  reasoningFormat: z.enum(["parsed", "raw", "hidden"]).optional(),
146
- reasoningEffort: z.string().optional(),
202
+ /**
203
+ * Specifies the reasoning effort level for model inference.
204
+ * @see https://console.groq.com/docs/reasoning#reasoning-effort
205
+ */
206
+ reasoningEffort: z.enum(["none", "default", "low", "medium", "high"]).optional(),
147
207
  /**
148
208
  * Whether to enable parallel function calling during tool use. Default to true.
149
209
  */
@@ -158,7 +218,24 @@ var groqProviderOptions = z.object({
158
218
  *
159
219
  * @default true
160
220
  */
161
- structuredOutputs: z.boolean().optional()
221
+ structuredOutputs: z.boolean().optional(),
222
+ /**
223
+ * Whether to use strict JSON schema validation.
224
+ * When true, the model uses constrained decoding to guarantee schema compliance.
225
+ * Only used when structured outputs are enabled and a schema is provided.
226
+ *
227
+ * @default true
228
+ */
229
+ strictJsonSchema: z.boolean().optional(),
230
+ /**
231
+ * Service tier for the request.
232
+ * - 'on_demand': Default tier with consistent performance and fairness
233
+ * - 'flex': Higher throughput tier optimized for workloads that can handle occasional request failures
234
+ * - 'auto': Uses on_demand rate limits, then falls back to flex tier if exceeded
235
+ *
236
+ * @default 'on_demand'
237
+ */
238
+ serviceTier: z.enum(["on_demand", "flex", "auto"]).optional()
162
239
  });
163
240
 
164
241
  // src/groq-error.ts
@@ -179,21 +256,53 @@ var groqFailedResponseHandler = createJsonErrorResponseHandler({
179
256
  import {
180
257
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
181
258
  } from "@ai-sdk/provider";
259
+
260
+ // src/groq-browser-search-models.ts
261
+ var BROWSER_SEARCH_SUPPORTED_MODELS = [
262
+ "openai/gpt-oss-20b",
263
+ "openai/gpt-oss-120b"
264
+ ];
265
+ function isBrowserSearchSupportedModel(modelId) {
266
+ return BROWSER_SEARCH_SUPPORTED_MODELS.includes(modelId);
267
+ }
268
+ function getSupportedModelsString() {
269
+ return BROWSER_SEARCH_SUPPORTED_MODELS.join(", ");
270
+ }
271
+
272
+ // src/groq-prepare-tools.ts
182
273
  function prepareTools({
183
274
  tools,
184
- toolChoice
275
+ toolChoice,
276
+ modelId
185
277
  }) {
186
278
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
187
279
  const toolWarnings = [];
188
280
  if (tools == null) {
189
281
  return { tools: void 0, toolChoice: void 0, toolWarnings };
190
282
  }
191
- const groqTools = [];
283
+ const groqTools2 = [];
192
284
  for (const tool of tools) {
193
- if (tool.type === "provider-defined") {
194
- toolWarnings.push({ type: "unsupported-tool", tool });
285
+ if (tool.type === "provider") {
286
+ if (tool.id === "groq.browser_search") {
287
+ if (!isBrowserSearchSupportedModel(modelId)) {
288
+ toolWarnings.push({
289
+ type: "unsupported",
290
+ feature: `provider-defined tool ${tool.id}`,
291
+ details: `Browser search is only supported on the following models: ${getSupportedModelsString()}. Current model: ${modelId}`
292
+ });
293
+ } else {
294
+ groqTools2.push({
295
+ type: "browser_search"
296
+ });
297
+ }
298
+ } else {
299
+ toolWarnings.push({
300
+ type: "unsupported",
301
+ feature: `provider-defined tool ${tool.id}`
302
+ });
303
+ }
195
304
  } else {
196
- groqTools.push({
305
+ groqTools2.push({
197
306
  type: "function",
198
307
  function: {
199
308
  name: tool.name,
@@ -204,17 +313,17 @@ function prepareTools({
204
313
  }
205
314
  }
206
315
  if (toolChoice == null) {
207
- return { tools: groqTools, toolChoice: void 0, toolWarnings };
316
+ return { tools: groqTools2, toolChoice: void 0, toolWarnings };
208
317
  }
209
318
  const type = toolChoice.type;
210
319
  switch (type) {
211
320
  case "auto":
212
321
  case "none":
213
322
  case "required":
214
- return { tools: groqTools, toolChoice: type, toolWarnings };
323
+ return { tools: groqTools2, toolChoice: type, toolWarnings };
215
324
  case "tool":
216
325
  return {
217
- tools: groqTools,
326
+ tools: groqTools2,
218
327
  toolChoice: {
219
328
  type: "function",
220
329
  function: {
@@ -245,14 +354,14 @@ function mapGroqFinishReason(finishReason) {
245
354
  case "tool_calls":
246
355
  return "tool-calls";
247
356
  default:
248
- return "unknown";
357
+ return "other";
249
358
  }
250
359
  }
251
360
 
252
361
  // src/groq-chat-language-model.ts
253
362
  var GroqChatLanguageModel = class {
254
363
  constructor(modelId, config) {
255
- this.specificationVersion = "v2";
364
+ this.specificationVersion = "v3";
256
365
  this.supportedUrls = {
257
366
  "image/*": [/^https?:\/\/.*$/]
258
367
  };
@@ -278,7 +387,7 @@ var GroqChatLanguageModel = class {
278
387
  toolChoice,
279
388
  providerOptions
280
389
  }) {
281
- var _a, _b;
390
+ var _a, _b, _c;
282
391
  const warnings = [];
283
392
  const groqOptions = await parseProviderOptions({
284
393
  provider: "groq",
@@ -286,24 +395,22 @@ var GroqChatLanguageModel = class {
286
395
  schema: groqProviderOptions
287
396
  });
288
397
  const structuredOutputs = (_a = groqOptions == null ? void 0 : groqOptions.structuredOutputs) != null ? _a : true;
398
+ const strictJsonSchema = (_b = groqOptions == null ? void 0 : groqOptions.strictJsonSchema) != null ? _b : true;
289
399
  if (topK != null) {
290
- warnings.push({
291
- type: "unsupported-setting",
292
- setting: "topK"
293
- });
400
+ warnings.push({ type: "unsupported", feature: "topK" });
294
401
  }
295
402
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
296
403
  warnings.push({
297
- type: "unsupported-setting",
298
- setting: "responseFormat",
404
+ type: "unsupported",
405
+ feature: "responseFormat",
299
406
  details: "JSON response format schema is only supported with structuredOutputs"
300
407
  });
301
408
  }
302
409
  const {
303
- tools: groqTools,
410
+ tools: groqTools2,
304
411
  toolChoice: groqToolChoice,
305
412
  toolWarnings
306
- } = prepareTools({ tools, toolChoice });
413
+ } = prepareTools({ tools, toolChoice, modelId: this.modelId });
307
414
  return {
308
415
  args: {
309
416
  // model id:
@@ -324,24 +431,26 @@ var GroqChatLanguageModel = class {
324
431
  type: "json_schema",
325
432
  json_schema: {
326
433
  schema: responseFormat.schema,
327
- name: (_b = responseFormat.name) != null ? _b : "response",
434
+ strict: strictJsonSchema,
435
+ name: (_c = responseFormat.name) != null ? _c : "response",
328
436
  description: responseFormat.description
329
437
  }
330
438
  } : { type: "json_object" } : void 0,
331
439
  // provider options:
332
440
  reasoning_format: groqOptions == null ? void 0 : groqOptions.reasoningFormat,
333
441
  reasoning_effort: groqOptions == null ? void 0 : groqOptions.reasoningEffort,
442
+ service_tier: groqOptions == null ? void 0 : groqOptions.serviceTier,
334
443
  // messages:
335
444
  messages: convertToGroqChatMessages(prompt),
336
445
  // tools:
337
- tools: groqTools,
446
+ tools: groqTools2,
338
447
  tool_choice: groqToolChoice
339
448
  },
340
449
  warnings: [...warnings, ...toolWarnings]
341
450
  };
342
451
  }
343
452
  async doGenerate(options) {
344
- var _a, _b, _c, _d, _e, _f, _g;
453
+ var _a, _b;
345
454
  const { args, warnings } = await this.getArgs({
346
455
  ...options,
347
456
  stream: false
@@ -390,12 +499,11 @@ var GroqChatLanguageModel = class {
390
499
  }
391
500
  return {
392
501
  content,
393
- finishReason: mapGroqFinishReason(choice.finish_reason),
394
- usage: {
395
- inputTokens: (_c = (_b = response.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : void 0,
396
- outputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : void 0,
397
- totalTokens: (_g = (_f = response.usage) == null ? void 0 : _f.total_tokens) != null ? _g : void 0
502
+ finishReason: {
503
+ unified: mapGroqFinishReason(choice.finish_reason),
504
+ raw: (_b = choice.finish_reason) != null ? _b : void 0
398
505
  },
506
+ usage: convertGroqUsage(response.usage),
399
507
  response: {
400
508
  ...getResponseMetadata(response),
401
509
  headers: responseHeaders,
@@ -424,12 +532,11 @@ var GroqChatLanguageModel = class {
424
532
  fetch: this.config.fetch
425
533
  });
426
534
  const toolCalls = [];
427
- let finishReason = "unknown";
428
- const usage = {
429
- inputTokens: void 0,
430
- outputTokens: void 0,
431
- totalTokens: void 0
535
+ let finishReason = {
536
+ unified: "other",
537
+ raw: void 0
432
538
  };
539
+ let usage = void 0;
433
540
  let isFirstChunk = true;
434
541
  let isActiveText = false;
435
542
  let isActiveReasoning = false;
@@ -441,18 +548,24 @@ var GroqChatLanguageModel = class {
441
548
  controller.enqueue({ type: "stream-start", warnings });
442
549
  },
443
550
  transform(chunk, controller) {
444
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
551
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
445
552
  if (options.includeRawChunks) {
446
553
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
447
554
  }
448
555
  if (!chunk.success) {
449
- finishReason = "error";
556
+ finishReason = {
557
+ unified: "error",
558
+ raw: void 0
559
+ };
450
560
  controller.enqueue({ type: "error", error: chunk.error });
451
561
  return;
452
562
  }
453
563
  const value = chunk.value;
454
564
  if ("error" in value) {
455
- finishReason = "error";
565
+ finishReason = {
566
+ unified: "error",
567
+ raw: void 0
568
+ };
456
569
  controller.enqueue({ type: "error", error: value.error });
457
570
  return;
458
571
  }
@@ -464,13 +577,14 @@ var GroqChatLanguageModel = class {
464
577
  });
465
578
  }
466
579
  if (((_a = value.x_groq) == null ? void 0 : _a.usage) != null) {
467
- usage.inputTokens = (_b = value.x_groq.usage.prompt_tokens) != null ? _b : void 0;
468
- usage.outputTokens = (_c = value.x_groq.usage.completion_tokens) != null ? _c : void 0;
469
- usage.totalTokens = (_d = value.x_groq.usage.total_tokens) != null ? _d : void 0;
580
+ usage = value.x_groq.usage;
470
581
  }
471
582
  const choice = value.choices[0];
472
583
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
473
- finishReason = mapGroqFinishReason(choice.finish_reason);
584
+ finishReason = {
585
+ unified: mapGroqFinishReason(choice.finish_reason),
586
+ raw: choice.finish_reason
587
+ };
474
588
  }
475
589
  if ((choice == null ? void 0 : choice.delta) == null) {
476
590
  return;
@@ -491,6 +605,13 @@ var GroqChatLanguageModel = class {
491
605
  });
492
606
  }
493
607
  if (delta.content != null && delta.content.length > 0) {
608
+ if (isActiveReasoning) {
609
+ controller.enqueue({
610
+ type: "reasoning-end",
611
+ id: "reasoning-0"
612
+ });
613
+ isActiveReasoning = false;
614
+ }
494
615
  if (!isActiveText) {
495
616
  controller.enqueue({ type: "text-start", id: "txt-0" });
496
617
  isActiveText = true;
@@ -502,6 +623,13 @@ var GroqChatLanguageModel = class {
502
623
  });
503
624
  }
504
625
  if (delta.tool_calls != null) {
626
+ if (isActiveReasoning) {
627
+ controller.enqueue({
628
+ type: "reasoning-end",
629
+ id: "reasoning-0"
630
+ });
631
+ isActiveReasoning = false;
632
+ }
505
633
  for (const toolCallDelta of delta.tool_calls) {
506
634
  const index = toolCallDelta.index;
507
635
  if (toolCalls[index] == null) {
@@ -517,7 +645,7 @@ var GroqChatLanguageModel = class {
517
645
  message: `Expected 'id' to be a string.`
518
646
  });
519
647
  }
520
- if (((_e = toolCallDelta.function) == null ? void 0 : _e.name) == null) {
648
+ if (((_b = toolCallDelta.function) == null ? void 0 : _b.name) == null) {
521
649
  throw new InvalidResponseDataError({
522
650
  data: toolCallDelta,
523
651
  message: `Expected 'function.name' to be a string.`
@@ -533,12 +661,12 @@ var GroqChatLanguageModel = class {
533
661
  type: "function",
534
662
  function: {
535
663
  name: toolCallDelta.function.name,
536
- arguments: (_f = toolCallDelta.function.arguments) != null ? _f : ""
664
+ arguments: (_c = toolCallDelta.function.arguments) != null ? _c : ""
537
665
  },
538
666
  hasFinished: false
539
667
  };
540
668
  const toolCall2 = toolCalls[index];
541
- if (((_g = toolCall2.function) == null ? void 0 : _g.name) != null && ((_h = toolCall2.function) == null ? void 0 : _h.arguments) != null) {
669
+ if (((_d = toolCall2.function) == null ? void 0 : _d.name) != null && ((_e = toolCall2.function) == null ? void 0 : _e.arguments) != null) {
542
670
  if (toolCall2.function.arguments.length > 0) {
543
671
  controller.enqueue({
544
672
  type: "tool-input-delta",
@@ -553,7 +681,7 @@ var GroqChatLanguageModel = class {
553
681
  });
554
682
  controller.enqueue({
555
683
  type: "tool-call",
556
- toolCallId: (_i = toolCall2.id) != null ? _i : generateId(),
684
+ toolCallId: (_f = toolCall2.id) != null ? _f : generateId(),
557
685
  toolName: toolCall2.function.name,
558
686
  input: toolCall2.function.arguments
559
687
  });
@@ -566,22 +694,22 @@ var GroqChatLanguageModel = class {
566
694
  if (toolCall.hasFinished) {
567
695
  continue;
568
696
  }
569
- if (((_j = toolCallDelta.function) == null ? void 0 : _j.arguments) != null) {
570
- toolCall.function.arguments += (_l = (_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null ? _l : "";
697
+ if (((_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null) {
698
+ toolCall.function.arguments += (_i = (_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null ? _i : "";
571
699
  }
572
700
  controller.enqueue({
573
701
  type: "tool-input-delta",
574
702
  id: toolCall.id,
575
- delta: (_m = toolCallDelta.function.arguments) != null ? _m : ""
703
+ delta: (_j = toolCallDelta.function.arguments) != null ? _j : ""
576
704
  });
577
- if (((_n = toolCall.function) == null ? void 0 : _n.name) != null && ((_o = toolCall.function) == null ? void 0 : _o.arguments) != null && isParsableJson(toolCall.function.arguments)) {
705
+ if (((_k = toolCall.function) == null ? void 0 : _k.name) != null && ((_l = toolCall.function) == null ? void 0 : _l.arguments) != null && isParsableJson(toolCall.function.arguments)) {
578
706
  controller.enqueue({
579
707
  type: "tool-input-end",
580
708
  id: toolCall.id
581
709
  });
582
710
  controller.enqueue({
583
711
  type: "tool-call",
584
- toolCallId: (_p = toolCall.id) != null ? _p : generateId(),
712
+ toolCallId: (_m = toolCall.id) != null ? _m : generateId(),
585
713
  toolName: toolCall.function.name,
586
714
  input: toolCall.function.arguments
587
715
  });
@@ -600,7 +728,7 @@ var GroqChatLanguageModel = class {
600
728
  controller.enqueue({
601
729
  type: "finish",
602
730
  finishReason,
603
- usage,
731
+ usage: convertGroqUsage(usage),
604
732
  ...providerMetadata != null ? { providerMetadata } : {}
605
733
  });
606
734
  }
@@ -638,7 +766,13 @@ var groqChatResponseSchema = z3.object({
638
766
  usage: z3.object({
639
767
  prompt_tokens: z3.number().nullish(),
640
768
  completion_tokens: z3.number().nullish(),
641
- total_tokens: z3.number().nullish()
769
+ total_tokens: z3.number().nullish(),
770
+ prompt_tokens_details: z3.object({
771
+ cached_tokens: z3.number().nullish()
772
+ }).nullish(),
773
+ completion_tokens_details: z3.object({
774
+ reasoning_tokens: z3.number().nullish()
775
+ }).nullish()
642
776
  }).nullish()
643
777
  });
644
778
  var groqChatChunkSchema = z3.union([
@@ -671,7 +805,13 @@ var groqChatChunkSchema = z3.union([
671
805
  usage: z3.object({
672
806
  prompt_tokens: z3.number().nullish(),
673
807
  completion_tokens: z3.number().nullish(),
674
- total_tokens: z3.number().nullish()
808
+ total_tokens: z3.number().nullish(),
809
+ prompt_tokens_details: z3.object({
810
+ cached_tokens: z3.number().nullish()
811
+ }).nullish(),
812
+ completion_tokens_details: z3.object({
813
+ reasoning_tokens: z3.number().nullish()
814
+ }).nullish()
675
815
  }).nullish()
676
816
  }).nullish()
677
817
  }),
@@ -683,6 +823,7 @@ import {
683
823
  combineHeaders as combineHeaders2,
684
824
  convertBase64ToUint8Array,
685
825
  createJsonResponseHandler as createJsonResponseHandler2,
826
+ mediaTypeToExtension,
686
827
  parseProviderOptions as parseProviderOptions2,
687
828
  postFormDataToApi
688
829
  } from "@ai-sdk/provider-utils";
@@ -698,7 +839,7 @@ var GroqTranscriptionModel = class {
698
839
  constructor(modelId, config) {
699
840
  this.modelId = modelId;
700
841
  this.config = config;
701
- this.specificationVersion = "v2";
842
+ this.specificationVersion = "v3";
702
843
  }
703
844
  get provider() {
704
845
  return this.config.provider;
@@ -718,7 +859,12 @@ var GroqTranscriptionModel = class {
718
859
  const formData = new FormData();
719
860
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
720
861
  formData.append("model", this.modelId);
721
- formData.append("file", new File([blob], "audio", { type: mediaType }));
862
+ const fileExtension = mediaTypeToExtension(mediaType);
863
+ formData.append(
864
+ "file",
865
+ new File([blob], "audio", { type: mediaType }),
866
+ `audio.${fileExtension}`
867
+ );
722
868
  if (groqOptions) {
723
869
  const transcriptionModelOptions = {
724
870
  language: (_a = groqOptions.language) != null ? _a : void 0,
@@ -730,7 +876,13 @@ var GroqTranscriptionModel = class {
730
876
  for (const key in transcriptionModelOptions) {
731
877
  const value = transcriptionModelOptions[key];
732
878
  if (value !== void 0) {
733
- formData.append(key, String(value));
879
+ if (Array.isArray(value)) {
880
+ for (const item of value) {
881
+ formData.append(`${key}[]`, String(item));
882
+ }
883
+ } else {
884
+ formData.append(key, String(value));
885
+ }
734
886
  }
735
887
  }
736
888
  }
@@ -740,7 +892,7 @@ var GroqTranscriptionModel = class {
740
892
  };
741
893
  }
742
894
  async doGenerate(options) {
743
- var _a, _b, _c, _d, _e;
895
+ var _a, _b, _c, _d, _e, _f, _g;
744
896
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
745
897
  const { formData, warnings } = await this.getArgs(options);
746
898
  const {
@@ -768,8 +920,8 @@ var GroqTranscriptionModel = class {
768
920
  startSecond: segment.start,
769
921
  endSecond: segment.end
770
922
  }))) != null ? _e : [],
771
- language: response.language,
772
- durationInSeconds: response.duration,
923
+ language: (_f = response.language) != null ? _f : void 0,
924
+ durationInSeconds: (_g = response.duration) != null ? _g : void 0,
773
925
  warnings,
774
926
  response: {
775
927
  timestamp: currentDate,
@@ -781,10 +933,14 @@ var GroqTranscriptionModel = class {
781
933
  }
782
934
  };
783
935
  var groqTranscriptionResponseSchema = z4.object({
784
- task: z4.string(),
785
- language: z4.string(),
786
- duration: z4.number(),
787
936
  text: z4.string(),
937
+ x_groq: z4.object({
938
+ id: z4.string()
939
+ }),
940
+ // additional properties are returned when `response_format: 'verbose_json'` is
941
+ task: z4.string().nullish(),
942
+ language: z4.string().nullish(),
943
+ duration: z4.number().nullish(),
788
944
  segments: z4.array(
789
945
  z4.object({
790
946
  id: z4.number(),
@@ -798,24 +954,40 @@ var groqTranscriptionResponseSchema = z4.object({
798
954
  compression_ratio: z4.number(),
799
955
  no_speech_prob: z4.number()
800
956
  })
801
- ),
802
- x_groq: z4.object({
803
- id: z4.string()
804
- })
957
+ ).nullish()
958
+ });
959
+
960
+ // src/tool/browser-search.ts
961
+ import { createProviderToolFactory } from "@ai-sdk/provider-utils";
962
+ import { z as z5 } from "zod/v4";
963
+ var browserSearch = createProviderToolFactory({
964
+ id: "groq.browser_search",
965
+ inputSchema: z5.object({})
805
966
  });
806
967
 
968
+ // src/groq-tools.ts
969
+ var groqTools = {
970
+ browserSearch
971
+ };
972
+
973
+ // src/version.ts
974
+ var VERSION = true ? "0.0.0-4115c213-20260122152721" : "0.0.0-test";
975
+
807
976
  // src/groq-provider.ts
808
977
  function createGroq(options = {}) {
809
978
  var _a;
810
979
  const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.groq.com/openai/v1";
811
- const getHeaders = () => ({
812
- Authorization: `Bearer ${loadApiKey({
813
- apiKey: options.apiKey,
814
- environmentVariableName: "GROQ_API_KEY",
815
- description: "Groq"
816
- })}`,
817
- ...options.headers
818
- });
980
+ const getHeaders = () => withUserAgentSuffix(
981
+ {
982
+ Authorization: `Bearer ${loadApiKey({
983
+ apiKey: options.apiKey,
984
+ environmentVariableName: "GROQ_API_KEY",
985
+ description: "Groq"
986
+ })}`,
987
+ ...options.headers
988
+ },
989
+ `ai-sdk/groq/${VERSION}`
990
+ );
819
991
  const createChatModel = (modelId) => new GroqChatLanguageModel(modelId, {
820
992
  provider: "groq.chat",
821
993
  url: ({ path }) => `${baseURL}${path}`,
@@ -841,19 +1013,25 @@ function createGroq(options = {}) {
841
1013
  const provider = function(modelId) {
842
1014
  return createLanguageModel(modelId);
843
1015
  };
1016
+ provider.specificationVersion = "v3";
844
1017
  provider.languageModel = createLanguageModel;
845
1018
  provider.chat = createChatModel;
846
- provider.textEmbeddingModel = (modelId) => {
847
- throw new NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
1019
+ provider.embeddingModel = (modelId) => {
1020
+ throw new NoSuchModelError({ modelId, modelType: "embeddingModel" });
848
1021
  };
1022
+ provider.textEmbeddingModel = provider.embeddingModel;
849
1023
  provider.imageModel = (modelId) => {
850
1024
  throw new NoSuchModelError({ modelId, modelType: "imageModel" });
851
1025
  };
852
1026
  provider.transcription = createTranscriptionModel;
1027
+ provider.transcriptionModel = createTranscriptionModel;
1028
+ provider.tools = groqTools;
853
1029
  return provider;
854
1030
  }
855
1031
  var groq = createGroq();
856
1032
  export {
1033
+ VERSION,
1034
+ browserSearch,
857
1035
  createGroq,
858
1036
  groq
859
1037
  };