@ai-sdk/groq 0.0.0-013d7476-20250808163325 → 0.0.0-1c33ba03-20260114162300

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -4,7 +4,8 @@ import {
4
4
  } from "@ai-sdk/provider";
5
5
  import {
6
6
  loadApiKey,
7
- withoutTrailingSlash
7
+ withoutTrailingSlash,
8
+ withUserAgentSuffix
8
9
  } from "@ai-sdk/provider-utils";
9
10
 
10
11
  // src/groq-chat-language-model.ts
@@ -22,11 +23,50 @@ import {
22
23
  } from "@ai-sdk/provider-utils";
23
24
  import { z as z3 } from "zod/v4";
24
25
 
26
+ // src/convert-groq-usage.ts
27
+ function convertGroqUsage(usage) {
28
+ var _a, _b;
29
+ if (usage == null) {
30
+ return {
31
+ inputTokens: {
32
+ total: void 0,
33
+ noCache: void 0,
34
+ cacheRead: void 0,
35
+ cacheWrite: void 0
36
+ },
37
+ outputTokens: {
38
+ total: void 0,
39
+ text: void 0,
40
+ reasoning: void 0
41
+ },
42
+ raw: void 0
43
+ };
44
+ }
45
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
46
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
47
+ return {
48
+ inputTokens: {
49
+ total: promptTokens,
50
+ noCache: promptTokens,
51
+ cacheRead: void 0,
52
+ cacheWrite: void 0
53
+ },
54
+ outputTokens: {
55
+ total: completionTokens,
56
+ text: completionTokens,
57
+ reasoning: void 0
58
+ },
59
+ raw: usage
60
+ };
61
+ }
62
+
25
63
  // src/convert-to-groq-chat-messages.ts
26
64
  import {
27
65
  UnsupportedFunctionalityError
28
66
  } from "@ai-sdk/provider";
67
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
29
68
  function convertToGroqChatMessages(prompt) {
69
+ var _a;
30
70
  const messages = [];
31
71
  for (const { role, content } of prompt) {
32
72
  switch (role) {
@@ -56,7 +96,7 @@ function convertToGroqChatMessages(prompt) {
56
96
  return {
57
97
  type: "image_url",
58
98
  image_url: {
59
- url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
99
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${convertToBase64(part.data)}`
60
100
  }
61
101
  };
62
102
  }
@@ -67,9 +107,16 @@ function convertToGroqChatMessages(prompt) {
67
107
  }
68
108
  case "assistant": {
69
109
  let text = "";
110
+ let reasoning = "";
70
111
  const toolCalls = [];
71
112
  for (const part of content) {
72
113
  switch (part.type) {
114
+ // groq supports reasoning for tool-calls in multi-turn conversations
115
+ // https://github.com/vercel/ai/issues/7860
116
+ case "reasoning": {
117
+ reasoning += part.text;
118
+ break;
119
+ }
73
120
  case "text": {
74
121
  text += part.text;
75
122
  break;
@@ -90,12 +137,16 @@ function convertToGroqChatMessages(prompt) {
90
137
  messages.push({
91
138
  role: "assistant",
92
139
  content: text,
93
- tool_calls: toolCalls.length > 0 ? toolCalls : void 0
140
+ ...reasoning.length > 0 ? { reasoning } : null,
141
+ ...toolCalls.length > 0 ? { tool_calls: toolCalls } : null
94
142
  });
95
143
  break;
96
144
  }
97
145
  case "tool": {
98
146
  for (const toolResponse of content) {
147
+ if (toolResponse.type === "tool-approval-response") {
148
+ continue;
149
+ }
99
150
  const output = toolResponse.output;
100
151
  let contentValue;
101
152
  switch (output.type) {
@@ -103,6 +154,9 @@ function convertToGroqChatMessages(prompt) {
103
154
  case "error-text":
104
155
  contentValue = output.value;
105
156
  break;
157
+ case "execution-denied":
158
+ contentValue = (_a = output.reason) != null ? _a : "Tool execution denied.";
159
+ break;
106
160
  case "content":
107
161
  case "json":
108
162
  case "error-json":
@@ -143,7 +197,11 @@ function getResponseMetadata({
143
197
  import { z } from "zod/v4";
144
198
  var groqProviderOptions = z.object({
145
199
  reasoningFormat: z.enum(["parsed", "raw", "hidden"]).optional(),
146
- reasoningEffort: z.string().optional(),
200
+ /**
201
+ * Specifies the reasoning effort level for model inference.
202
+ * @see https://console.groq.com/docs/reasoning#reasoning-effort
203
+ */
204
+ reasoningEffort: z.enum(["none", "default", "low", "medium", "high"]).optional(),
147
205
  /**
148
206
  * Whether to enable parallel function calling during tool use. Default to true.
149
207
  */
@@ -158,7 +216,24 @@ var groqProviderOptions = z.object({
158
216
  *
159
217
  * @default true
160
218
  */
161
- structuredOutputs: z.boolean().optional()
219
+ structuredOutputs: z.boolean().optional(),
220
+ /**
221
+ * Whether to use strict JSON schema validation.
222
+ * When true, the model uses constrained decoding to guarantee schema compliance.
223
+ * Only used when structured outputs are enabled and a schema is provided.
224
+ *
225
+ * @default true
226
+ */
227
+ strictJsonSchema: z.boolean().optional(),
228
+ /**
229
+ * Service tier for the request.
230
+ * - 'on_demand': Default tier with consistent performance and fairness
231
+ * - 'flex': Higher throughput tier optimized for workloads that can handle occasional request failures
232
+ * - 'auto': Uses on_demand rate limits, then falls back to flex tier if exceeded
233
+ *
234
+ * @default 'on_demand'
235
+ */
236
+ serviceTier: z.enum(["on_demand", "flex", "auto"]).optional()
162
237
  });
163
238
 
164
239
  // src/groq-error.ts
@@ -179,21 +254,53 @@ var groqFailedResponseHandler = createJsonErrorResponseHandler({
179
254
  import {
180
255
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
181
256
  } from "@ai-sdk/provider";
257
+
258
+ // src/groq-browser-search-models.ts
259
+ var BROWSER_SEARCH_SUPPORTED_MODELS = [
260
+ "openai/gpt-oss-20b",
261
+ "openai/gpt-oss-120b"
262
+ ];
263
+ function isBrowserSearchSupportedModel(modelId) {
264
+ return BROWSER_SEARCH_SUPPORTED_MODELS.includes(modelId);
265
+ }
266
+ function getSupportedModelsString() {
267
+ return BROWSER_SEARCH_SUPPORTED_MODELS.join(", ");
268
+ }
269
+
270
+ // src/groq-prepare-tools.ts
182
271
  function prepareTools({
183
272
  tools,
184
- toolChoice
273
+ toolChoice,
274
+ modelId
185
275
  }) {
186
276
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
187
277
  const toolWarnings = [];
188
278
  if (tools == null) {
189
279
  return { tools: void 0, toolChoice: void 0, toolWarnings };
190
280
  }
191
- const groqTools = [];
281
+ const groqTools2 = [];
192
282
  for (const tool of tools) {
193
- if (tool.type === "provider-defined") {
194
- toolWarnings.push({ type: "unsupported-tool", tool });
283
+ if (tool.type === "provider") {
284
+ if (tool.id === "groq.browser_search") {
285
+ if (!isBrowserSearchSupportedModel(modelId)) {
286
+ toolWarnings.push({
287
+ type: "unsupported",
288
+ feature: `provider-defined tool ${tool.id}`,
289
+ details: `Browser search is only supported on the following models: ${getSupportedModelsString()}. Current model: ${modelId}`
290
+ });
291
+ } else {
292
+ groqTools2.push({
293
+ type: "browser_search"
294
+ });
295
+ }
296
+ } else {
297
+ toolWarnings.push({
298
+ type: "unsupported",
299
+ feature: `provider-defined tool ${tool.id}`
300
+ });
301
+ }
195
302
  } else {
196
- groqTools.push({
303
+ groqTools2.push({
197
304
  type: "function",
198
305
  function: {
199
306
  name: tool.name,
@@ -204,17 +311,17 @@ function prepareTools({
204
311
  }
205
312
  }
206
313
  if (toolChoice == null) {
207
- return { tools: groqTools, toolChoice: void 0, toolWarnings };
314
+ return { tools: groqTools2, toolChoice: void 0, toolWarnings };
208
315
  }
209
316
  const type = toolChoice.type;
210
317
  switch (type) {
211
318
  case "auto":
212
319
  case "none":
213
320
  case "required":
214
- return { tools: groqTools, toolChoice: type, toolWarnings };
321
+ return { tools: groqTools2, toolChoice: type, toolWarnings };
215
322
  case "tool":
216
323
  return {
217
- tools: groqTools,
324
+ tools: groqTools2,
218
325
  toolChoice: {
219
326
  type: "function",
220
327
  function: {
@@ -245,14 +352,14 @@ function mapGroqFinishReason(finishReason) {
245
352
  case "tool_calls":
246
353
  return "tool-calls";
247
354
  default:
248
- return "unknown";
355
+ return "other";
249
356
  }
250
357
  }
251
358
 
252
359
  // src/groq-chat-language-model.ts
253
360
  var GroqChatLanguageModel = class {
254
361
  constructor(modelId, config) {
255
- this.specificationVersion = "v2";
362
+ this.specificationVersion = "v3";
256
363
  this.supportedUrls = {
257
364
  "image/*": [/^https?:\/\/.*$/]
258
365
  };
@@ -278,7 +385,7 @@ var GroqChatLanguageModel = class {
278
385
  toolChoice,
279
386
  providerOptions
280
387
  }) {
281
- var _a, _b;
388
+ var _a, _b, _c;
282
389
  const warnings = [];
283
390
  const groqOptions = await parseProviderOptions({
284
391
  provider: "groq",
@@ -286,24 +393,22 @@ var GroqChatLanguageModel = class {
286
393
  schema: groqProviderOptions
287
394
  });
288
395
  const structuredOutputs = (_a = groqOptions == null ? void 0 : groqOptions.structuredOutputs) != null ? _a : true;
396
+ const strictJsonSchema = (_b = groqOptions == null ? void 0 : groqOptions.strictJsonSchema) != null ? _b : true;
289
397
  if (topK != null) {
290
- warnings.push({
291
- type: "unsupported-setting",
292
- setting: "topK"
293
- });
398
+ warnings.push({ type: "unsupported", feature: "topK" });
294
399
  }
295
400
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
296
401
  warnings.push({
297
- type: "unsupported-setting",
298
- setting: "responseFormat",
402
+ type: "unsupported",
403
+ feature: "responseFormat",
299
404
  details: "JSON response format schema is only supported with structuredOutputs"
300
405
  });
301
406
  }
302
407
  const {
303
- tools: groqTools,
408
+ tools: groqTools2,
304
409
  toolChoice: groqToolChoice,
305
410
  toolWarnings
306
- } = prepareTools({ tools, toolChoice });
411
+ } = prepareTools({ tools, toolChoice, modelId: this.modelId });
307
412
  return {
308
413
  args: {
309
414
  // model id:
@@ -324,24 +429,26 @@ var GroqChatLanguageModel = class {
324
429
  type: "json_schema",
325
430
  json_schema: {
326
431
  schema: responseFormat.schema,
327
- name: (_b = responseFormat.name) != null ? _b : "response",
432
+ strict: strictJsonSchema,
433
+ name: (_c = responseFormat.name) != null ? _c : "response",
328
434
  description: responseFormat.description
329
435
  }
330
436
  } : { type: "json_object" } : void 0,
331
437
  // provider options:
332
438
  reasoning_format: groqOptions == null ? void 0 : groqOptions.reasoningFormat,
333
439
  reasoning_effort: groqOptions == null ? void 0 : groqOptions.reasoningEffort,
440
+ service_tier: groqOptions == null ? void 0 : groqOptions.serviceTier,
334
441
  // messages:
335
442
  messages: convertToGroqChatMessages(prompt),
336
443
  // tools:
337
- tools: groqTools,
444
+ tools: groqTools2,
338
445
  tool_choice: groqToolChoice
339
446
  },
340
447
  warnings: [...warnings, ...toolWarnings]
341
448
  };
342
449
  }
343
450
  async doGenerate(options) {
344
- var _a, _b, _c, _d, _e, _f, _g;
451
+ var _a, _b;
345
452
  const { args, warnings } = await this.getArgs({
346
453
  ...options,
347
454
  stream: false
@@ -390,12 +497,11 @@ var GroqChatLanguageModel = class {
390
497
  }
391
498
  return {
392
499
  content,
393
- finishReason: mapGroqFinishReason(choice.finish_reason),
394
- usage: {
395
- inputTokens: (_c = (_b = response.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : void 0,
396
- outputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : void 0,
397
- totalTokens: (_g = (_f = response.usage) == null ? void 0 : _f.total_tokens) != null ? _g : void 0
500
+ finishReason: {
501
+ unified: mapGroqFinishReason(choice.finish_reason),
502
+ raw: (_b = choice.finish_reason) != null ? _b : void 0
398
503
  },
504
+ usage: convertGroqUsage(response.usage),
399
505
  response: {
400
506
  ...getResponseMetadata(response),
401
507
  headers: responseHeaders,
@@ -424,12 +530,11 @@ var GroqChatLanguageModel = class {
424
530
  fetch: this.config.fetch
425
531
  });
426
532
  const toolCalls = [];
427
- let finishReason = "unknown";
428
- const usage = {
429
- inputTokens: void 0,
430
- outputTokens: void 0,
431
- totalTokens: void 0
533
+ let finishReason = {
534
+ unified: "other",
535
+ raw: void 0
432
536
  };
537
+ let usage = void 0;
433
538
  let isFirstChunk = true;
434
539
  let isActiveText = false;
435
540
  let isActiveReasoning = false;
@@ -441,18 +546,24 @@ var GroqChatLanguageModel = class {
441
546
  controller.enqueue({ type: "stream-start", warnings });
442
547
  },
443
548
  transform(chunk, controller) {
444
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
549
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
445
550
  if (options.includeRawChunks) {
446
551
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
447
552
  }
448
553
  if (!chunk.success) {
449
- finishReason = "error";
554
+ finishReason = {
555
+ unified: "error",
556
+ raw: void 0
557
+ };
450
558
  controller.enqueue({ type: "error", error: chunk.error });
451
559
  return;
452
560
  }
453
561
  const value = chunk.value;
454
562
  if ("error" in value) {
455
- finishReason = "error";
563
+ finishReason = {
564
+ unified: "error",
565
+ raw: void 0
566
+ };
456
567
  controller.enqueue({ type: "error", error: value.error });
457
568
  return;
458
569
  }
@@ -464,13 +575,14 @@ var GroqChatLanguageModel = class {
464
575
  });
465
576
  }
466
577
  if (((_a = value.x_groq) == null ? void 0 : _a.usage) != null) {
467
- usage.inputTokens = (_b = value.x_groq.usage.prompt_tokens) != null ? _b : void 0;
468
- usage.outputTokens = (_c = value.x_groq.usage.completion_tokens) != null ? _c : void 0;
469
- usage.totalTokens = (_d = value.x_groq.usage.total_tokens) != null ? _d : void 0;
578
+ usage = value.x_groq.usage;
470
579
  }
471
580
  const choice = value.choices[0];
472
581
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
473
- finishReason = mapGroqFinishReason(choice.finish_reason);
582
+ finishReason = {
583
+ unified: mapGroqFinishReason(choice.finish_reason),
584
+ raw: choice.finish_reason
585
+ };
474
586
  }
475
587
  if ((choice == null ? void 0 : choice.delta) == null) {
476
588
  return;
@@ -491,6 +603,13 @@ var GroqChatLanguageModel = class {
491
603
  });
492
604
  }
493
605
  if (delta.content != null && delta.content.length > 0) {
606
+ if (isActiveReasoning) {
607
+ controller.enqueue({
608
+ type: "reasoning-end",
609
+ id: "reasoning-0"
610
+ });
611
+ isActiveReasoning = false;
612
+ }
494
613
  if (!isActiveText) {
495
614
  controller.enqueue({ type: "text-start", id: "txt-0" });
496
615
  isActiveText = true;
@@ -502,6 +621,13 @@ var GroqChatLanguageModel = class {
502
621
  });
503
622
  }
504
623
  if (delta.tool_calls != null) {
624
+ if (isActiveReasoning) {
625
+ controller.enqueue({
626
+ type: "reasoning-end",
627
+ id: "reasoning-0"
628
+ });
629
+ isActiveReasoning = false;
630
+ }
505
631
  for (const toolCallDelta of delta.tool_calls) {
506
632
  const index = toolCallDelta.index;
507
633
  if (toolCalls[index] == null) {
@@ -517,7 +643,7 @@ var GroqChatLanguageModel = class {
517
643
  message: `Expected 'id' to be a string.`
518
644
  });
519
645
  }
520
- if (((_e = toolCallDelta.function) == null ? void 0 : _e.name) == null) {
646
+ if (((_b = toolCallDelta.function) == null ? void 0 : _b.name) == null) {
521
647
  throw new InvalidResponseDataError({
522
648
  data: toolCallDelta,
523
649
  message: `Expected 'function.name' to be a string.`
@@ -533,12 +659,12 @@ var GroqChatLanguageModel = class {
533
659
  type: "function",
534
660
  function: {
535
661
  name: toolCallDelta.function.name,
536
- arguments: (_f = toolCallDelta.function.arguments) != null ? _f : ""
662
+ arguments: (_c = toolCallDelta.function.arguments) != null ? _c : ""
537
663
  },
538
664
  hasFinished: false
539
665
  };
540
666
  const toolCall2 = toolCalls[index];
541
- if (((_g = toolCall2.function) == null ? void 0 : _g.name) != null && ((_h = toolCall2.function) == null ? void 0 : _h.arguments) != null) {
667
+ if (((_d = toolCall2.function) == null ? void 0 : _d.name) != null && ((_e = toolCall2.function) == null ? void 0 : _e.arguments) != null) {
542
668
  if (toolCall2.function.arguments.length > 0) {
543
669
  controller.enqueue({
544
670
  type: "tool-input-delta",
@@ -553,7 +679,7 @@ var GroqChatLanguageModel = class {
553
679
  });
554
680
  controller.enqueue({
555
681
  type: "tool-call",
556
- toolCallId: (_i = toolCall2.id) != null ? _i : generateId(),
682
+ toolCallId: (_f = toolCall2.id) != null ? _f : generateId(),
557
683
  toolName: toolCall2.function.name,
558
684
  input: toolCall2.function.arguments
559
685
  });
@@ -566,22 +692,22 @@ var GroqChatLanguageModel = class {
566
692
  if (toolCall.hasFinished) {
567
693
  continue;
568
694
  }
569
- if (((_j = toolCallDelta.function) == null ? void 0 : _j.arguments) != null) {
570
- toolCall.function.arguments += (_l = (_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null ? _l : "";
695
+ if (((_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null) {
696
+ toolCall.function.arguments += (_i = (_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null ? _i : "";
571
697
  }
572
698
  controller.enqueue({
573
699
  type: "tool-input-delta",
574
700
  id: toolCall.id,
575
- delta: (_m = toolCallDelta.function.arguments) != null ? _m : ""
701
+ delta: (_j = toolCallDelta.function.arguments) != null ? _j : ""
576
702
  });
577
- if (((_n = toolCall.function) == null ? void 0 : _n.name) != null && ((_o = toolCall.function) == null ? void 0 : _o.arguments) != null && isParsableJson(toolCall.function.arguments)) {
703
+ if (((_k = toolCall.function) == null ? void 0 : _k.name) != null && ((_l = toolCall.function) == null ? void 0 : _l.arguments) != null && isParsableJson(toolCall.function.arguments)) {
578
704
  controller.enqueue({
579
705
  type: "tool-input-end",
580
706
  id: toolCall.id
581
707
  });
582
708
  controller.enqueue({
583
709
  type: "tool-call",
584
- toolCallId: (_p = toolCall.id) != null ? _p : generateId(),
710
+ toolCallId: (_m = toolCall.id) != null ? _m : generateId(),
585
711
  toolName: toolCall.function.name,
586
712
  input: toolCall.function.arguments
587
713
  });
@@ -600,7 +726,7 @@ var GroqChatLanguageModel = class {
600
726
  controller.enqueue({
601
727
  type: "finish",
602
728
  finishReason,
603
- usage,
729
+ usage: convertGroqUsage(usage),
604
730
  ...providerMetadata != null ? { providerMetadata } : {}
605
731
  });
606
732
  }
@@ -638,7 +764,10 @@ var groqChatResponseSchema = z3.object({
638
764
  usage: z3.object({
639
765
  prompt_tokens: z3.number().nullish(),
640
766
  completion_tokens: z3.number().nullish(),
641
- total_tokens: z3.number().nullish()
767
+ total_tokens: z3.number().nullish(),
768
+ prompt_tokens_details: z3.object({
769
+ cached_tokens: z3.number().nullish()
770
+ }).nullish()
642
771
  }).nullish()
643
772
  });
644
773
  var groqChatChunkSchema = z3.union([
@@ -671,7 +800,10 @@ var groqChatChunkSchema = z3.union([
671
800
  usage: z3.object({
672
801
  prompt_tokens: z3.number().nullish(),
673
802
  completion_tokens: z3.number().nullish(),
674
- total_tokens: z3.number().nullish()
803
+ total_tokens: z3.number().nullish(),
804
+ prompt_tokens_details: z3.object({
805
+ cached_tokens: z3.number().nullish()
806
+ }).nullish()
675
807
  }).nullish()
676
808
  }).nullish()
677
809
  }),
@@ -683,6 +815,7 @@ import {
683
815
  combineHeaders as combineHeaders2,
684
816
  convertBase64ToUint8Array,
685
817
  createJsonResponseHandler as createJsonResponseHandler2,
818
+ mediaTypeToExtension,
686
819
  parseProviderOptions as parseProviderOptions2,
687
820
  postFormDataToApi
688
821
  } from "@ai-sdk/provider-utils";
@@ -698,7 +831,7 @@ var GroqTranscriptionModel = class {
698
831
  constructor(modelId, config) {
699
832
  this.modelId = modelId;
700
833
  this.config = config;
701
- this.specificationVersion = "v2";
834
+ this.specificationVersion = "v3";
702
835
  }
703
836
  get provider() {
704
837
  return this.config.provider;
@@ -718,7 +851,12 @@ var GroqTranscriptionModel = class {
718
851
  const formData = new FormData();
719
852
  const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
720
853
  formData.append("model", this.modelId);
721
- formData.append("file", new File([blob], "audio", { type: mediaType }));
854
+ const fileExtension = mediaTypeToExtension(mediaType);
855
+ formData.append(
856
+ "file",
857
+ new File([blob], "audio", { type: mediaType }),
858
+ `audio.${fileExtension}`
859
+ );
722
860
  if (groqOptions) {
723
861
  const transcriptionModelOptions = {
724
862
  language: (_a = groqOptions.language) != null ? _a : void 0,
@@ -740,7 +878,7 @@ var GroqTranscriptionModel = class {
740
878
  };
741
879
  }
742
880
  async doGenerate(options) {
743
- var _a, _b, _c, _d, _e;
881
+ var _a, _b, _c, _d, _e, _f, _g;
744
882
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
745
883
  const { formData, warnings } = await this.getArgs(options);
746
884
  const {
@@ -768,8 +906,8 @@ var GroqTranscriptionModel = class {
768
906
  startSecond: segment.start,
769
907
  endSecond: segment.end
770
908
  }))) != null ? _e : [],
771
- language: response.language,
772
- durationInSeconds: response.duration,
909
+ language: (_f = response.language) != null ? _f : void 0,
910
+ durationInSeconds: (_g = response.duration) != null ? _g : void 0,
773
911
  warnings,
774
912
  response: {
775
913
  timestamp: currentDate,
@@ -781,10 +919,14 @@ var GroqTranscriptionModel = class {
781
919
  }
782
920
  };
783
921
  var groqTranscriptionResponseSchema = z4.object({
784
- task: z4.string(),
785
- language: z4.string(),
786
- duration: z4.number(),
787
922
  text: z4.string(),
923
+ x_groq: z4.object({
924
+ id: z4.string()
925
+ }),
926
+ // additional properties are returned when `response_format: 'verbose_json'` is
927
+ task: z4.string().nullish(),
928
+ language: z4.string().nullish(),
929
+ duration: z4.number().nullish(),
788
930
  segments: z4.array(
789
931
  z4.object({
790
932
  id: z4.number(),
@@ -798,24 +940,40 @@ var groqTranscriptionResponseSchema = z4.object({
798
940
  compression_ratio: z4.number(),
799
941
  no_speech_prob: z4.number()
800
942
  })
801
- ),
802
- x_groq: z4.object({
803
- id: z4.string()
804
- })
943
+ ).nullish()
944
+ });
945
+
946
+ // src/tool/browser-search.ts
947
+ import { createProviderToolFactory } from "@ai-sdk/provider-utils";
948
+ import { z as z5 } from "zod/v4";
949
+ var browserSearch = createProviderToolFactory({
950
+ id: "groq.browser_search",
951
+ inputSchema: z5.object({})
805
952
  });
806
953
 
954
+ // src/groq-tools.ts
955
+ var groqTools = {
956
+ browserSearch
957
+ };
958
+
959
+ // src/version.ts
960
+ var VERSION = true ? "0.0.0-1c33ba03-20260114162300" : "0.0.0-test";
961
+
807
962
  // src/groq-provider.ts
808
963
  function createGroq(options = {}) {
809
964
  var _a;
810
965
  const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.groq.com/openai/v1";
811
- const getHeaders = () => ({
812
- Authorization: `Bearer ${loadApiKey({
813
- apiKey: options.apiKey,
814
- environmentVariableName: "GROQ_API_KEY",
815
- description: "Groq"
816
- })}`,
817
- ...options.headers
818
- });
966
+ const getHeaders = () => withUserAgentSuffix(
967
+ {
968
+ Authorization: `Bearer ${loadApiKey({
969
+ apiKey: options.apiKey,
970
+ environmentVariableName: "GROQ_API_KEY",
971
+ description: "Groq"
972
+ })}`,
973
+ ...options.headers
974
+ },
975
+ `ai-sdk/groq/${VERSION}`
976
+ );
819
977
  const createChatModel = (modelId) => new GroqChatLanguageModel(modelId, {
820
978
  provider: "groq.chat",
821
979
  url: ({ path }) => `${baseURL}${path}`,
@@ -841,19 +999,25 @@ function createGroq(options = {}) {
841
999
  const provider = function(modelId) {
842
1000
  return createLanguageModel(modelId);
843
1001
  };
1002
+ provider.specificationVersion = "v3";
844
1003
  provider.languageModel = createLanguageModel;
845
1004
  provider.chat = createChatModel;
846
- provider.textEmbeddingModel = (modelId) => {
847
- throw new NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
1005
+ provider.embeddingModel = (modelId) => {
1006
+ throw new NoSuchModelError({ modelId, modelType: "embeddingModel" });
848
1007
  };
1008
+ provider.textEmbeddingModel = provider.embeddingModel;
849
1009
  provider.imageModel = (modelId) => {
850
1010
  throw new NoSuchModelError({ modelId, modelType: "imageModel" });
851
1011
  };
852
1012
  provider.transcription = createTranscriptionModel;
1013
+ provider.transcriptionModel = createTranscriptionModel;
1014
+ provider.tools = groqTools;
853
1015
  return provider;
854
1016
  }
855
1017
  var groq = createGroq();
856
1018
  export {
1019
+ VERSION,
1020
+ browserSearch,
857
1021
  createGroq,
858
1022
  groq
859
1023
  };