@ai-sdk/groq 2.0.0-alpha.9 → 2.0.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,91 @@
1
1
  # @ai-sdk/groq
2
2
 
3
+ ## 2.0.0-beta.2
4
+
5
+ ### Patch Changes
6
+
7
+ - d1a034f: feature: using Zod 4 for internal stuff
8
+ - Updated dependencies [0571b98]
9
+ - Updated dependencies [39a4fab]
10
+ - Updated dependencies [d1a034f]
11
+ - @ai-sdk/provider-utils@3.0.0-beta.2
12
+
13
+ ## 2.0.0-beta.1
14
+
15
+ ### Patch Changes
16
+
17
+ - Updated dependencies [742b7be]
18
+ - Updated dependencies [7cddb72]
19
+ - Updated dependencies [ccce59b]
20
+ - Updated dependencies [e2b9e4b]
21
+ - Updated dependencies [45c1ea2]
22
+ - Updated dependencies [e025824]
23
+ - Updated dependencies [0d06df6]
24
+ - Updated dependencies [472524a]
25
+ - Updated dependencies [dd3ff01]
26
+ - Updated dependencies [7435eb5]
27
+ - Updated dependencies [cb68df0]
28
+ - Updated dependencies [bfdca8d]
29
+ - Updated dependencies [44f4aba]
30
+ - Updated dependencies [023ba40]
31
+ - Updated dependencies [5e57fae]
32
+ - Updated dependencies [71f938d]
33
+ - Updated dependencies [28a5ed5]
34
+ - @ai-sdk/provider@2.0.0-beta.1
35
+ - @ai-sdk/provider-utils@3.0.0-beta.1
36
+
37
+ ## 2.0.0-alpha.15
38
+
39
+ ### Patch Changes
40
+
41
+ - Updated dependencies [48d257a]
42
+ - Updated dependencies [8ba77a7]
43
+ - @ai-sdk/provider@2.0.0-alpha.15
44
+ - @ai-sdk/provider-utils@3.0.0-alpha.15
45
+
46
+ ## 2.0.0-alpha.14
47
+
48
+ ### Patch Changes
49
+
50
+ - Updated dependencies [b5da06a]
51
+ - Updated dependencies [63f9e9b]
52
+ - Updated dependencies [2e13791]
53
+ - @ai-sdk/provider@2.0.0-alpha.14
54
+ - @ai-sdk/provider-utils@3.0.0-alpha.14
55
+
56
+ ## 2.0.0-alpha.13
57
+
58
+ ### Patch Changes
59
+
60
+ - Updated dependencies [68ecf2f]
61
+ - @ai-sdk/provider@2.0.0-alpha.13
62
+ - @ai-sdk/provider-utils@3.0.0-alpha.13
63
+
64
+ ## 2.0.0-alpha.12
65
+
66
+ ### Patch Changes
67
+
68
+ - e2aceaf: feat: add raw chunk support
69
+ - Updated dependencies [e2aceaf]
70
+ - @ai-sdk/provider@2.0.0-alpha.12
71
+ - @ai-sdk/provider-utils@3.0.0-alpha.12
72
+
73
+ ## 2.0.0-alpha.11
74
+
75
+ ### Patch Changes
76
+
77
+ - Updated dependencies [c1e6647]
78
+ - @ai-sdk/provider@2.0.0-alpha.11
79
+ - @ai-sdk/provider-utils@3.0.0-alpha.11
80
+
81
+ ## 2.0.0-alpha.10
82
+
83
+ ### Patch Changes
84
+
85
+ - Updated dependencies [c4df419]
86
+ - @ai-sdk/provider@2.0.0-alpha.10
87
+ - @ai-sdk/provider-utils@3.0.0-alpha.10
88
+
3
89
  ## 2.0.0-alpha.9
4
90
 
5
91
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1,28 +1,17 @@
1
- import { ProviderV2, LanguageModelV2, TranscriptionModelV1 } from '@ai-sdk/provider';
1
+ import { ProviderV2, LanguageModelV2, TranscriptionModelV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z } from 'zod';
3
+ import { z } from 'zod/v4';
4
4
 
5
5
  type GroqChatModelId = 'gemma2-9b-it' | 'llama-3.3-70b-versatile' | 'llama-3.1-8b-instant' | 'llama-guard-3-8b' | 'llama3-70b-8192' | 'llama3-8b-8192' | 'mixtral-8x7b-32768' | 'meta-llama/llama-4-scout-17b-16e-instruct' | 'qwen-qwq-32b' | 'mistral-saba-24b' | 'qwen-2.5-32b' | 'deepseek-r1-distill-qwen-32b' | 'deepseek-r1-distill-llama-70b' | (string & {});
6
6
  declare const groqProviderOptions: z.ZodObject<{
7
- reasoningFormat: z.ZodOptional<z.ZodEnum<["parsed", "raw", "hidden"]>>;
8
- /**
9
- * Whether to enable parallel function calling during tool use. Default to true.
10
- */
7
+ reasoningFormat: z.ZodOptional<z.ZodEnum<{
8
+ parsed: "parsed";
9
+ raw: "raw";
10
+ hidden: "hidden";
11
+ }>>;
11
12
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
12
- /**
13
- * A unique identifier representing your end-user, which can help OpenAI to
14
- * monitor and detect abuse. Learn more.
15
- */
16
13
  user: z.ZodOptional<z.ZodString>;
17
- }, "strip", z.ZodTypeAny, {
18
- user?: string | undefined;
19
- reasoningFormat?: "parsed" | "raw" | "hidden" | undefined;
20
- parallelToolCalls?: boolean | undefined;
21
- }, {
22
- user?: string | undefined;
23
- reasoningFormat?: "parsed" | "raw" | "hidden" | undefined;
24
- parallelToolCalls?: boolean | undefined;
25
- }>;
14
+ }, z.core.$strip>;
26
15
  type GroqProviderOptions = z.infer<typeof groqProviderOptions>;
27
16
 
28
17
  type GroqTranscriptionModelId = 'whisper-large-v3-turbo' | 'distil-whisper-large-v3-en' | 'whisper-large-v3' | (string & {});
@@ -39,7 +28,7 @@ interface GroqProvider extends ProviderV2 {
39
28
  /**
40
29
  Creates a model for transcription.
41
30
  */
42
- transcription(modelId: GroqTranscriptionModelId): TranscriptionModelV1;
31
+ transcription(modelId: GroqTranscriptionModelId): TranscriptionModelV2;
43
32
  }
44
33
  interface GroqProviderSettings {
45
34
  /**
package/dist/index.d.ts CHANGED
@@ -1,28 +1,17 @@
1
- import { ProviderV2, LanguageModelV2, TranscriptionModelV1 } from '@ai-sdk/provider';
1
+ import { ProviderV2, LanguageModelV2, TranscriptionModelV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { z } from 'zod';
3
+ import { z } from 'zod/v4';
4
4
 
5
5
  type GroqChatModelId = 'gemma2-9b-it' | 'llama-3.3-70b-versatile' | 'llama-3.1-8b-instant' | 'llama-guard-3-8b' | 'llama3-70b-8192' | 'llama3-8b-8192' | 'mixtral-8x7b-32768' | 'meta-llama/llama-4-scout-17b-16e-instruct' | 'qwen-qwq-32b' | 'mistral-saba-24b' | 'qwen-2.5-32b' | 'deepseek-r1-distill-qwen-32b' | 'deepseek-r1-distill-llama-70b' | (string & {});
6
6
  declare const groqProviderOptions: z.ZodObject<{
7
- reasoningFormat: z.ZodOptional<z.ZodEnum<["parsed", "raw", "hidden"]>>;
8
- /**
9
- * Whether to enable parallel function calling during tool use. Default to true.
10
- */
7
+ reasoningFormat: z.ZodOptional<z.ZodEnum<{
8
+ parsed: "parsed";
9
+ raw: "raw";
10
+ hidden: "hidden";
11
+ }>>;
11
12
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
12
- /**
13
- * A unique identifier representing your end-user, which can help OpenAI to
14
- * monitor and detect abuse. Learn more.
15
- */
16
13
  user: z.ZodOptional<z.ZodString>;
17
- }, "strip", z.ZodTypeAny, {
18
- user?: string | undefined;
19
- reasoningFormat?: "parsed" | "raw" | "hidden" | undefined;
20
- parallelToolCalls?: boolean | undefined;
21
- }, {
22
- user?: string | undefined;
23
- reasoningFormat?: "parsed" | "raw" | "hidden" | undefined;
24
- parallelToolCalls?: boolean | undefined;
25
- }>;
14
+ }, z.core.$strip>;
26
15
  type GroqProviderOptions = z.infer<typeof groqProviderOptions>;
27
16
 
28
17
  type GroqTranscriptionModelId = 'whisper-large-v3-turbo' | 'distil-whisper-large-v3-en' | 'whisper-large-v3' | (string & {});
@@ -39,7 +28,7 @@ interface GroqProvider extends ProviderV2 {
39
28
  /**
40
29
  Creates a model for transcription.
41
30
  */
42
- transcription(modelId: GroqTranscriptionModelId): TranscriptionModelV1;
31
+ transcription(modelId: GroqTranscriptionModelId): TranscriptionModelV2;
43
32
  }
44
33
  interface GroqProviderSettings {
45
34
  /**
package/dist/index.js CHANGED
@@ -32,7 +32,7 @@ var import_provider_utils4 = require("@ai-sdk/provider-utils");
32
32
  // src/groq-chat-language-model.ts
33
33
  var import_provider3 = require("@ai-sdk/provider");
34
34
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
35
- var import_zod3 = require("zod");
35
+ var import_v43 = require("zod/v4");
36
36
 
37
37
  // src/convert-to-groq-chat-messages.ts
38
38
  var import_provider = require("@ai-sdk/provider");
@@ -90,7 +90,7 @@ function convertToGroqChatMessages(prompt) {
90
90
  type: "function",
91
91
  function: {
92
92
  name: part.toolName,
93
- arguments: JSON.stringify(part.args)
93
+ arguments: JSON.stringify(part.input)
94
94
  }
95
95
  });
96
96
  break;
@@ -106,10 +106,23 @@ function convertToGroqChatMessages(prompt) {
106
106
  }
107
107
  case "tool": {
108
108
  for (const toolResponse of content) {
109
+ const output = toolResponse.output;
110
+ let contentValue;
111
+ switch (output.type) {
112
+ case "text":
113
+ case "error-text":
114
+ contentValue = output.value;
115
+ break;
116
+ case "content":
117
+ case "json":
118
+ case "error-json":
119
+ contentValue = JSON.stringify(output.value);
120
+ break;
121
+ }
109
122
  messages.push({
110
123
  role: "tool",
111
124
  tool_call_id: toolResponse.toolCallId,
112
- content: JSON.stringify(toolResponse.result)
125
+ content: contentValue
113
126
  });
114
127
  }
115
128
  break;
@@ -137,27 +150,27 @@ function getResponseMetadata({
137
150
  }
138
151
 
139
152
  // src/groq-chat-options.ts
140
- var import_zod = require("zod");
141
- var groqProviderOptions = import_zod.z.object({
142
- reasoningFormat: import_zod.z.enum(["parsed", "raw", "hidden"]).optional(),
153
+ var import_v4 = require("zod/v4");
154
+ var groqProviderOptions = import_v4.z.object({
155
+ reasoningFormat: import_v4.z.enum(["parsed", "raw", "hidden"]).optional(),
143
156
  /**
144
157
  * Whether to enable parallel function calling during tool use. Default to true.
145
158
  */
146
- parallelToolCalls: import_zod.z.boolean().optional(),
159
+ parallelToolCalls: import_v4.z.boolean().optional(),
147
160
  /**
148
161
  * A unique identifier representing your end-user, which can help OpenAI to
149
162
  * monitor and detect abuse. Learn more.
150
163
  */
151
- user: import_zod.z.string().optional()
164
+ user: import_v4.z.string().optional()
152
165
  });
153
166
 
154
167
  // src/groq-error.ts
155
- var import_zod2 = require("zod");
168
+ var import_v42 = require("zod/v4");
156
169
  var import_provider_utils = require("@ai-sdk/provider-utils");
157
- var groqErrorDataSchema = import_zod2.z.object({
158
- error: import_zod2.z.object({
159
- message: import_zod2.z.string(),
160
- type: import_zod2.z.string()
170
+ var groqErrorDataSchema = import_v42.z.object({
171
+ error: import_v42.z.object({
172
+ message: import_v42.z.string(),
173
+ type: import_v42.z.string()
161
174
  })
162
175
  });
163
176
  var groqFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
@@ -186,7 +199,7 @@ function prepareTools({
186
199
  function: {
187
200
  name: tool.name,
188
201
  description: tool.description,
189
- parameters: tool.parameters
202
+ parameters: tool.inputSchema
190
203
  }
191
204
  });
192
205
  }
@@ -363,10 +376,9 @@ var GroqChatLanguageModel = class {
363
376
  for (const toolCall of choice.message.tool_calls) {
364
377
  content.push({
365
378
  type: "tool-call",
366
- toolCallType: "function",
367
379
  toolCallId: (_a = toolCall.id) != null ? _a : (0, import_provider_utils2.generateId)(),
368
380
  toolName: toolCall.function.name,
369
- args: toolCall.function.arguments
381
+ input: toolCall.function.arguments
370
382
  });
371
383
  }
372
384
  }
@@ -413,6 +425,8 @@ var GroqChatLanguageModel = class {
413
425
  totalTokens: void 0
414
426
  };
415
427
  let isFirstChunk = true;
428
+ let isActiveText = false;
429
+ let isActiveReasoning = false;
416
430
  let providerMetadata;
417
431
  return {
418
432
  stream: response.pipeThrough(
@@ -422,6 +436,9 @@ var GroqChatLanguageModel = class {
422
436
  },
423
437
  transform(chunk, controller) {
424
438
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
439
+ if (options.includeRawChunks) {
440
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
441
+ }
425
442
  if (!chunk.success) {
426
443
  finishReason = "error";
427
444
  controller.enqueue({ type: "error", error: chunk.error });
@@ -454,15 +471,28 @@ var GroqChatLanguageModel = class {
454
471
  }
455
472
  const delta = choice.delta;
456
473
  if (delta.reasoning != null && delta.reasoning.length > 0) {
474
+ if (!isActiveReasoning) {
475
+ controller.enqueue({
476
+ type: "reasoning-start",
477
+ id: "reasoning-0"
478
+ });
479
+ isActiveReasoning = true;
480
+ }
457
481
  controller.enqueue({
458
- type: "reasoning",
459
- text: delta.reasoning
482
+ type: "reasoning-delta",
483
+ id: "reasoning-0",
484
+ delta: delta.reasoning
460
485
  });
461
486
  }
462
487
  if (delta.content != null && delta.content.length > 0) {
488
+ if (!isActiveText) {
489
+ controller.enqueue({ type: "text-start", id: "txt-0" });
490
+ isActiveText = true;
491
+ }
463
492
  controller.enqueue({
464
- type: "text",
465
- text: delta.content
493
+ type: "text-delta",
494
+ id: "txt-0",
495
+ delta: delta.content
466
496
  });
467
497
  }
468
498
  if (delta.tool_calls != null) {
@@ -487,6 +517,11 @@ var GroqChatLanguageModel = class {
487
517
  message: `Expected 'function.name' to be a string.`
488
518
  });
489
519
  }
520
+ controller.enqueue({
521
+ type: "tool-input-start",
522
+ id: toolCallDelta.id,
523
+ toolName: toolCallDelta.function.name
524
+ });
490
525
  toolCalls[index] = {
491
526
  id: toolCallDelta.id,
492
527
  type: "function",
@@ -500,20 +535,21 @@ var GroqChatLanguageModel = class {
500
535
  if (((_g = toolCall2.function) == null ? void 0 : _g.name) != null && ((_h = toolCall2.function) == null ? void 0 : _h.arguments) != null) {
501
536
  if (toolCall2.function.arguments.length > 0) {
502
537
  controller.enqueue({
503
- type: "tool-call-delta",
504
- toolCallType: "function",
505
- toolCallId: toolCall2.id,
506
- toolName: toolCall2.function.name,
507
- argsTextDelta: toolCall2.function.arguments
538
+ type: "tool-input-delta",
539
+ id: toolCall2.id,
540
+ delta: toolCall2.function.arguments
508
541
  });
509
542
  }
510
543
  if ((0, import_provider_utils2.isParsableJson)(toolCall2.function.arguments)) {
544
+ controller.enqueue({
545
+ type: "tool-input-end",
546
+ id: toolCall2.id
547
+ });
511
548
  controller.enqueue({
512
549
  type: "tool-call",
513
- toolCallType: "function",
514
550
  toolCallId: (_i = toolCall2.id) != null ? _i : (0, import_provider_utils2.generateId)(),
515
551
  toolName: toolCall2.function.name,
516
- args: toolCall2.function.arguments
552
+ input: toolCall2.function.arguments
517
553
  });
518
554
  toolCall2.hasFinished = true;
519
555
  }
@@ -528,19 +564,20 @@ var GroqChatLanguageModel = class {
528
564
  toolCall.function.arguments += (_l = (_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null ? _l : "";
529
565
  }
530
566
  controller.enqueue({
531
- type: "tool-call-delta",
532
- toolCallType: "function",
533
- toolCallId: toolCall.id,
534
- toolName: toolCall.function.name,
535
- argsTextDelta: (_m = toolCallDelta.function.arguments) != null ? _m : ""
567
+ type: "tool-input-delta",
568
+ id: toolCall.id,
569
+ delta: (_m = toolCallDelta.function.arguments) != null ? _m : ""
536
570
  });
537
571
  if (((_n = toolCall.function) == null ? void 0 : _n.name) != null && ((_o = toolCall.function) == null ? void 0 : _o.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
572
+ controller.enqueue({
573
+ type: "tool-input-end",
574
+ id: toolCall.id
575
+ });
538
576
  controller.enqueue({
539
577
  type: "tool-call",
540
- toolCallType: "function",
541
578
  toolCallId: (_p = toolCall.id) != null ? _p : (0, import_provider_utils2.generateId)(),
542
579
  toolName: toolCall.function.name,
543
- args: toolCall.function.arguments
580
+ input: toolCall.function.arguments
544
581
  });
545
582
  toolCall.hasFinished = true;
546
583
  }
@@ -548,6 +585,12 @@ var GroqChatLanguageModel = class {
548
585
  }
549
586
  },
550
587
  flush(controller) {
588
+ if (isActiveReasoning) {
589
+ controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
590
+ }
591
+ if (isActiveText) {
592
+ controller.enqueue({ type: "text-end", id: "txt-0" });
593
+ }
551
594
  controller.enqueue({
552
595
  type: "finish",
553
596
  finishReason,
@@ -562,67 +605,67 @@ var GroqChatLanguageModel = class {
562
605
  };
563
606
  }
564
607
  };
565
- var groqChatResponseSchema = import_zod3.z.object({
566
- id: import_zod3.z.string().nullish(),
567
- created: import_zod3.z.number().nullish(),
568
- model: import_zod3.z.string().nullish(),
569
- choices: import_zod3.z.array(
570
- import_zod3.z.object({
571
- message: import_zod3.z.object({
572
- content: import_zod3.z.string().nullish(),
573
- reasoning: import_zod3.z.string().nullish(),
574
- tool_calls: import_zod3.z.array(
575
- import_zod3.z.object({
576
- id: import_zod3.z.string().nullish(),
577
- type: import_zod3.z.literal("function"),
578
- function: import_zod3.z.object({
579
- name: import_zod3.z.string(),
580
- arguments: import_zod3.z.string()
608
+ var groqChatResponseSchema = import_v43.z.object({
609
+ id: import_v43.z.string().nullish(),
610
+ created: import_v43.z.number().nullish(),
611
+ model: import_v43.z.string().nullish(),
612
+ choices: import_v43.z.array(
613
+ import_v43.z.object({
614
+ message: import_v43.z.object({
615
+ content: import_v43.z.string().nullish(),
616
+ reasoning: import_v43.z.string().nullish(),
617
+ tool_calls: import_v43.z.array(
618
+ import_v43.z.object({
619
+ id: import_v43.z.string().nullish(),
620
+ type: import_v43.z.literal("function"),
621
+ function: import_v43.z.object({
622
+ name: import_v43.z.string(),
623
+ arguments: import_v43.z.string()
581
624
  })
582
625
  })
583
626
  ).nullish()
584
627
  }),
585
- index: import_zod3.z.number(),
586
- finish_reason: import_zod3.z.string().nullish()
628
+ index: import_v43.z.number(),
629
+ finish_reason: import_v43.z.string().nullish()
587
630
  })
588
631
  ),
589
- usage: import_zod3.z.object({
590
- prompt_tokens: import_zod3.z.number().nullish(),
591
- completion_tokens: import_zod3.z.number().nullish(),
592
- total_tokens: import_zod3.z.number().nullish()
632
+ usage: import_v43.z.object({
633
+ prompt_tokens: import_v43.z.number().nullish(),
634
+ completion_tokens: import_v43.z.number().nullish(),
635
+ total_tokens: import_v43.z.number().nullish()
593
636
  }).nullish()
594
637
  });
595
- var groqChatChunkSchema = import_zod3.z.union([
596
- import_zod3.z.object({
597
- id: import_zod3.z.string().nullish(),
598
- created: import_zod3.z.number().nullish(),
599
- model: import_zod3.z.string().nullish(),
600
- choices: import_zod3.z.array(
601
- import_zod3.z.object({
602
- delta: import_zod3.z.object({
603
- content: import_zod3.z.string().nullish(),
604
- reasoning: import_zod3.z.string().nullish(),
605
- tool_calls: import_zod3.z.array(
606
- import_zod3.z.object({
607
- index: import_zod3.z.number(),
608
- id: import_zod3.z.string().nullish(),
609
- type: import_zod3.z.literal("function").optional(),
610
- function: import_zod3.z.object({
611
- name: import_zod3.z.string().nullish(),
612
- arguments: import_zod3.z.string().nullish()
638
+ var groqChatChunkSchema = import_v43.z.union([
639
+ import_v43.z.object({
640
+ id: import_v43.z.string().nullish(),
641
+ created: import_v43.z.number().nullish(),
642
+ model: import_v43.z.string().nullish(),
643
+ choices: import_v43.z.array(
644
+ import_v43.z.object({
645
+ delta: import_v43.z.object({
646
+ content: import_v43.z.string().nullish(),
647
+ reasoning: import_v43.z.string().nullish(),
648
+ tool_calls: import_v43.z.array(
649
+ import_v43.z.object({
650
+ index: import_v43.z.number(),
651
+ id: import_v43.z.string().nullish(),
652
+ type: import_v43.z.literal("function").optional(),
653
+ function: import_v43.z.object({
654
+ name: import_v43.z.string().nullish(),
655
+ arguments: import_v43.z.string().nullish()
613
656
  })
614
657
  })
615
658
  ).nullish()
616
659
  }).nullish(),
617
- finish_reason: import_zod3.z.string().nullable().optional(),
618
- index: import_zod3.z.number()
660
+ finish_reason: import_v43.z.string().nullable().optional(),
661
+ index: import_v43.z.number()
619
662
  })
620
663
  ),
621
- x_groq: import_zod3.z.object({
622
- usage: import_zod3.z.object({
623
- prompt_tokens: import_zod3.z.number().nullish(),
624
- completion_tokens: import_zod3.z.number().nullish(),
625
- total_tokens: import_zod3.z.number().nullish()
664
+ x_groq: import_v43.z.object({
665
+ usage: import_v43.z.object({
666
+ prompt_tokens: import_v43.z.number().nullish(),
667
+ completion_tokens: import_v43.z.number().nullish(),
668
+ total_tokens: import_v43.z.number().nullish()
626
669
  }).nullish()
627
670
  }).nullish()
628
671
  }),
@@ -631,19 +674,19 @@ var groqChatChunkSchema = import_zod3.z.union([
631
674
 
632
675
  // src/groq-transcription-model.ts
633
676
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
634
- var import_zod4 = require("zod");
635
- var groqProviderOptionsSchema = import_zod4.z.object({
636
- language: import_zod4.z.string().nullish(),
637
- prompt: import_zod4.z.string().nullish(),
638
- responseFormat: import_zod4.z.string().nullish(),
639
- temperature: import_zod4.z.number().min(0).max(1).nullish(),
640
- timestampGranularities: import_zod4.z.array(import_zod4.z.string()).nullish()
677
+ var import_v44 = require("zod/v4");
678
+ var groqProviderOptionsSchema = import_v44.z.object({
679
+ language: import_v44.z.string().nullish(),
680
+ prompt: import_v44.z.string().nullish(),
681
+ responseFormat: import_v44.z.string().nullish(),
682
+ temperature: import_v44.z.number().min(0).max(1).nullish(),
683
+ timestampGranularities: import_v44.z.array(import_v44.z.string()).nullish()
641
684
  });
642
685
  var GroqTranscriptionModel = class {
643
686
  constructor(modelId, config) {
644
687
  this.modelId = modelId;
645
688
  this.config = config;
646
- this.specificationVersion = "v1";
689
+ this.specificationVersion = "v2";
647
690
  }
648
691
  get provider() {
649
692
  return this.config.provider;
@@ -725,27 +768,27 @@ var GroqTranscriptionModel = class {
725
768
  };
726
769
  }
727
770
  };
728
- var groqTranscriptionResponseSchema = import_zod4.z.object({
729
- task: import_zod4.z.string(),
730
- language: import_zod4.z.string(),
731
- duration: import_zod4.z.number(),
732
- text: import_zod4.z.string(),
733
- segments: import_zod4.z.array(
734
- import_zod4.z.object({
735
- id: import_zod4.z.number(),
736
- seek: import_zod4.z.number(),
737
- start: import_zod4.z.number(),
738
- end: import_zod4.z.number(),
739
- text: import_zod4.z.string(),
740
- tokens: import_zod4.z.array(import_zod4.z.number()),
741
- temperature: import_zod4.z.number(),
742
- avg_logprob: import_zod4.z.number(),
743
- compression_ratio: import_zod4.z.number(),
744
- no_speech_prob: import_zod4.z.number()
771
+ var groqTranscriptionResponseSchema = import_v44.z.object({
772
+ task: import_v44.z.string(),
773
+ language: import_v44.z.string(),
774
+ duration: import_v44.z.number(),
775
+ text: import_v44.z.string(),
776
+ segments: import_v44.z.array(
777
+ import_v44.z.object({
778
+ id: import_v44.z.number(),
779
+ seek: import_v44.z.number(),
780
+ start: import_v44.z.number(),
781
+ end: import_v44.z.number(),
782
+ text: import_v44.z.string(),
783
+ tokens: import_v44.z.array(import_v44.z.number()),
784
+ temperature: import_v44.z.number(),
785
+ avg_logprob: import_v44.z.number(),
786
+ compression_ratio: import_v44.z.number(),
787
+ no_speech_prob: import_v44.z.number()
745
788
  })
746
789
  ),
747
- x_groq: import_zod4.z.object({
748
- id: import_zod4.z.string()
790
+ x_groq: import_v44.z.object({
791
+ id: import_v44.z.string()
749
792
  })
750
793
  });
751
794