@ai-sdk/groq 2.0.0-canary.1 → 2.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -31,12 +31,11 @@ var import_provider_utils4 = require("@ai-sdk/provider-utils");
31
31
 
32
32
  // src/groq-chat-language-model.ts
33
33
  var import_provider3 = require("@ai-sdk/provider");
34
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
35
- var import_zod2 = require("zod");
34
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
35
+ var import_zod3 = require("zod");
36
36
 
37
37
  // src/convert-to-groq-chat-messages.ts
38
38
  var import_provider = require("@ai-sdk/provider");
39
- var import_provider_utils = require("@ai-sdk/provider-utils");
40
39
  function convertToGroqChatMessages(prompt) {
41
40
  const messages = [];
42
41
  for (const { role, content } of prompt) {
@@ -53,24 +52,24 @@ function convertToGroqChatMessages(prompt) {
53
52
  messages.push({
54
53
  role: "user",
55
54
  content: content.map((part) => {
56
- var _a;
57
55
  switch (part.type) {
58
56
  case "text": {
59
57
  return { type: "text", text: part.text };
60
58
  }
61
- case "image": {
59
+ case "file": {
60
+ if (!part.mediaType.startsWith("image/")) {
61
+ throw new import_provider.UnsupportedFunctionalityError({
62
+ functionality: "Non-image file content parts"
63
+ });
64
+ }
65
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
62
66
  return {
63
67
  type: "image_url",
64
68
  image_url: {
65
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${(0, import_provider_utils.convertUint8ArrayToBase64)(part.image)}`
69
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
66
70
  }
67
71
  };
68
72
  }
69
- case "file": {
70
- throw new import_provider.UnsupportedFunctionalityError({
71
- functionality: "File content parts in user messages"
72
- });
73
- }
74
73
  }
75
74
  })
76
75
  });
@@ -137,16 +136,31 @@ function getResponseMetadata({
137
136
  };
138
137
  }
139
138
 
140
- // src/groq-error.ts
139
+ // src/groq-chat-options.ts
141
140
  var import_zod = require("zod");
142
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
143
- var groqErrorDataSchema = import_zod.z.object({
144
- error: import_zod.z.object({
145
- message: import_zod.z.string(),
146
- type: import_zod.z.string()
141
+ var groqProviderOptions = import_zod.z.object({
142
+ reasoningFormat: import_zod.z.enum(["parsed", "raw", "hidden"]).nullish(),
143
+ /**
144
+ * Whether to enable parallel function calling during tool use. Default to true.
145
+ */
146
+ parallelToolCalls: import_zod.z.boolean().nullish(),
147
+ /**
148
+ * A unique identifier representing your end-user, which can help OpenAI to
149
+ * monitor and detect abuse. Learn more.
150
+ */
151
+ user: import_zod.z.string().nullish()
152
+ });
153
+
154
+ // src/groq-error.ts
155
+ var import_zod2 = require("zod");
156
+ var import_provider_utils = require("@ai-sdk/provider-utils");
157
+ var groqErrorDataSchema = import_zod2.z.object({
158
+ error: import_zod2.z.object({
159
+ message: import_zod2.z.string(),
160
+ type: import_zod2.z.string()
147
161
  })
148
162
  });
149
- var groqFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
163
+ var groqFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
150
164
  errorSchema: groqErrorDataSchema,
151
165
  errorToMessage: (data) => data.error.message
152
166
  });
@@ -154,15 +168,14 @@ var groqFailedResponseHandler = (0, import_provider_utils2.createJsonErrorRespon
154
168
  // src/groq-prepare-tools.ts
155
169
  var import_provider2 = require("@ai-sdk/provider");
156
170
  function prepareTools({
157
- mode
171
+ tools,
172
+ toolChoice
158
173
  }) {
159
- var _a;
160
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
174
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
161
175
  const toolWarnings = [];
162
176
  if (tools == null) {
163
- return { tools: void 0, tool_choice: void 0, toolWarnings };
177
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
164
178
  }
165
- const toolChoice = mode.toolChoice;
166
179
  const groqTools = [];
167
180
  for (const tool of tools) {
168
181
  if (tool.type === "provider-defined") {
@@ -179,18 +192,18 @@ function prepareTools({
179
192
  }
180
193
  }
181
194
  if (toolChoice == null) {
182
- return { tools: groqTools, tool_choice: void 0, toolWarnings };
195
+ return { tools: groqTools, toolChoice: void 0, toolWarnings };
183
196
  }
184
197
  const type = toolChoice.type;
185
198
  switch (type) {
186
199
  case "auto":
187
200
  case "none":
188
201
  case "required":
189
- return { tools: groqTools, tool_choice: type, toolWarnings };
202
+ return { tools: groqTools, toolChoice: type, toolWarnings };
190
203
  case "tool":
191
204
  return {
192
205
  tools: groqTools,
193
- tool_choice: {
206
+ toolChoice: {
194
207
  type: "function",
195
208
  function: {
196
209
  name: toolChoice.toolName
@@ -201,7 +214,7 @@ function prepareTools({
201
214
  default: {
202
215
  const _exhaustiveCheck = type;
203
216
  throw new import_provider2.UnsupportedFunctionalityError({
204
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
217
+ functionality: `tool choice type: ${_exhaustiveCheck}`
205
218
  });
206
219
  }
207
220
  }
@@ -226,24 +239,22 @@ function mapGroqFinishReason(finishReason) {
226
239
 
227
240
  // src/groq-chat-language-model.ts
228
241
  var GroqChatLanguageModel = class {
229
- constructor(modelId, settings, config) {
242
+ constructor(modelId, config) {
230
243
  this.specificationVersion = "v2";
231
- this.supportsStructuredOutputs = false;
232
- this.defaultObjectGenerationMode = "json";
233
244
  this.modelId = modelId;
234
- this.settings = settings;
235
245
  this.config = config;
236
246
  }
237
247
  get provider() {
238
248
  return this.config.provider;
239
249
  }
240
- get supportsImageUrls() {
241
- return !this.settings.downloadImages;
250
+ async getSupportedUrls() {
251
+ return {
252
+ "image/*": [/^https:\/\/.*$/]
253
+ };
242
254
  }
243
255
  getArgs({
244
- mode,
245
256
  prompt,
246
- maxTokens,
257
+ maxOutputTokens,
247
258
  temperature,
248
259
  topP,
249
260
  topK,
@@ -253,9 +264,10 @@ var GroqChatLanguageModel = class {
253
264
  responseFormat,
254
265
  seed,
255
266
  stream,
256
- providerMetadata
267
+ tools,
268
+ toolChoice,
269
+ providerOptions
257
270
  }) {
258
- const type = mode.type;
259
271
  const warnings = [];
260
272
  if (topK != null) {
261
273
  warnings.push({
@@ -270,133 +282,106 @@ var GroqChatLanguageModel = class {
270
282
  details: "JSON response format schema is not supported"
271
283
  });
272
284
  }
273
- const groqOptions = (0, import_provider_utils3.parseProviderOptions)({
285
+ const groqOptions = (0, import_provider_utils2.parseProviderOptions)({
274
286
  provider: "groq",
275
- providerOptions: providerMetadata,
276
- schema: import_zod2.z.object({
277
- reasoningFormat: import_zod2.z.enum(["parsed", "raw", "hidden"]).nullish()
278
- })
287
+ providerOptions,
288
+ schema: groqProviderOptions
279
289
  });
280
- const baseArgs = {
281
- // model id:
282
- model: this.modelId,
283
- // model specific settings:
284
- user: this.settings.user,
285
- parallel_tool_calls: this.settings.parallelToolCalls,
286
- // standardized settings:
287
- max_tokens: maxTokens,
288
- temperature,
289
- top_p: topP,
290
- frequency_penalty: frequencyPenalty,
291
- presence_penalty: presencePenalty,
292
- stop: stopSequences,
293
- seed,
294
- // response format:
295
- response_format: (
296
- // json object response format is not supported for streaming:
297
- stream === false && (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0
298
- ),
299
- // provider options:
300
- reasoning_format: groqOptions == null ? void 0 : groqOptions.reasoningFormat,
301
- // messages:
302
- messages: convertToGroqChatMessages(prompt)
290
+ const {
291
+ tools: groqTools,
292
+ toolChoice: groqToolChoice,
293
+ toolWarnings
294
+ } = prepareTools({ tools, toolChoice });
295
+ return {
296
+ args: {
297
+ // model id:
298
+ model: this.modelId,
299
+ // model specific settings:
300
+ user: groqOptions == null ? void 0 : groqOptions.user,
301
+ parallel_tool_calls: groqOptions == null ? void 0 : groqOptions.parallelToolCalls,
302
+ // standardized settings:
303
+ max_tokens: maxOutputTokens,
304
+ temperature,
305
+ top_p: topP,
306
+ frequency_penalty: frequencyPenalty,
307
+ presence_penalty: presencePenalty,
308
+ stop: stopSequences,
309
+ seed,
310
+ // response format:
311
+ response_format: (
312
+ // json object response format is not supported for streaming:
313
+ stream === false && (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0
314
+ ),
315
+ // provider options:
316
+ reasoning_format: groqOptions == null ? void 0 : groqOptions.reasoningFormat,
317
+ // messages:
318
+ messages: convertToGroqChatMessages(prompt),
319
+ // tools:
320
+ tools: groqTools,
321
+ tool_choice: groqToolChoice
322
+ },
323
+ warnings: [...warnings, ...toolWarnings]
303
324
  };
304
- switch (type) {
305
- case "regular": {
306
- const { tools, tool_choice, toolWarnings } = prepareTools({ mode });
307
- return {
308
- args: {
309
- ...baseArgs,
310
- tools,
311
- tool_choice
312
- },
313
- warnings: [...warnings, ...toolWarnings]
314
- };
315
- }
316
- case "object-json": {
317
- return {
318
- args: {
319
- ...baseArgs,
320
- response_format: (
321
- // json object response format is not supported for streaming:
322
- stream === false ? { type: "json_object" } : void 0
323
- )
324
- },
325
- warnings
326
- };
327
- }
328
- case "object-tool": {
329
- return {
330
- args: {
331
- ...baseArgs,
332
- tool_choice: {
333
- type: "function",
334
- function: { name: mode.tool.name }
335
- },
336
- tools: [
337
- {
338
- type: "function",
339
- function: {
340
- name: mode.tool.name,
341
- description: mode.tool.description,
342
- parameters: mode.tool.parameters
343
- }
344
- }
345
- ]
346
- },
347
- warnings
348
- };
349
- }
350
- default: {
351
- const _exhaustiveCheck = type;
352
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
353
- }
354
- }
355
325
  }
356
326
  async doGenerate(options) {
357
- var _a, _b, _c, _d, _e, _f, _g;
327
+ var _a, _b, _c, _d, _e;
358
328
  const { args, warnings } = this.getArgs({ ...options, stream: false });
359
329
  const body = JSON.stringify(args);
360
330
  const {
361
331
  responseHeaders,
362
332
  value: response,
363
333
  rawValue: rawResponse
364
- } = await (0, import_provider_utils3.postJsonToApi)({
334
+ } = await (0, import_provider_utils2.postJsonToApi)({
365
335
  url: this.config.url({
366
336
  path: "/chat/completions",
367
337
  modelId: this.modelId
368
338
  }),
369
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
339
+ headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
370
340
  body: args,
371
341
  failedResponseHandler: groqFailedResponseHandler,
372
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
342
+ successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
373
343
  groqChatResponseSchema
374
344
  ),
375
345
  abortSignal: options.abortSignal,
376
346
  fetch: this.config.fetch
377
347
  });
378
- const { messages: rawPrompt, ...rawSettings } = args;
379
348
  const choice = response.choices[0];
380
- return {
381
- text: (_a = choice.message.content) != null ? _a : void 0,
382
- reasoning: (_b = choice.message.reasoning) != null ? _b : void 0,
383
- toolCalls: (_c = choice.message.tool_calls) == null ? void 0 : _c.map((toolCall) => {
384
- var _a2;
385
- return {
349
+ const content = [];
350
+ const text = choice.message.content;
351
+ if (text != null && text.length > 0) {
352
+ content.push({ type: "text", text });
353
+ }
354
+ const reasoning = choice.message.reasoning;
355
+ if (reasoning != null && reasoning.length > 0) {
356
+ content.push({
357
+ type: "reasoning",
358
+ reasoningType: "text",
359
+ text: reasoning
360
+ });
361
+ }
362
+ if (choice.message.tool_calls != null) {
363
+ for (const toolCall of choice.message.tool_calls) {
364
+ content.push({
365
+ type: "tool-call",
386
366
  toolCallType: "function",
387
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils3.generateId)(),
367
+ toolCallId: (_a = toolCall.id) != null ? _a : (0, import_provider_utils2.generateId)(),
388
368
  toolName: toolCall.function.name,
389
369
  args: toolCall.function.arguments
390
- };
391
- }),
370
+ });
371
+ }
372
+ }
373
+ return {
374
+ content,
392
375
  finishReason: mapGroqFinishReason(choice.finish_reason),
393
376
  usage: {
394
- promptTokens: (_e = (_d = response.usage) == null ? void 0 : _d.prompt_tokens) != null ? _e : NaN,
395
- completionTokens: (_g = (_f = response.usage) == null ? void 0 : _f.completion_tokens) != null ? _g : NaN
377
+ inputTokens: (_c = (_b = response.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : void 0,
378
+ outputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : void 0
379
+ },
380
+ response: {
381
+ ...getResponseMetadata(response),
382
+ headers: responseHeaders,
383
+ body: rawResponse
396
384
  },
397
- rawCall: { rawPrompt, rawSettings },
398
- rawResponse: { headers: responseHeaders, body: rawResponse },
399
- response: getResponseMetadata(response),
400
385
  warnings,
401
386
  request: { body }
402
387
  };
@@ -404,33 +389,35 @@ var GroqChatLanguageModel = class {
404
389
  async doStream(options) {
405
390
  const { args, warnings } = this.getArgs({ ...options, stream: true });
406
391
  const body = JSON.stringify({ ...args, stream: true });
407
- const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
392
+ const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
408
393
  url: this.config.url({
409
394
  path: "/chat/completions",
410
395
  modelId: this.modelId
411
396
  }),
412
- headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
397
+ headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
413
398
  body: {
414
399
  ...args,
415
400
  stream: true
416
401
  },
417
402
  failedResponseHandler: groqFailedResponseHandler,
418
- successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(groqChatChunkSchema),
403
+ successfulResponseHandler: (0, import_provider_utils2.createEventSourceResponseHandler)(groqChatChunkSchema),
419
404
  abortSignal: options.abortSignal,
420
405
  fetch: this.config.fetch
421
406
  });
422
- const { messages: rawPrompt, ...rawSettings } = args;
423
407
  const toolCalls = [];
424
408
  let finishReason = "unknown";
425
- let usage = {
426
- promptTokens: void 0,
427
- completionTokens: void 0
409
+ const usage = {
410
+ inputTokens: void 0,
411
+ outputTokens: void 0
428
412
  };
429
413
  let isFirstChunk = true;
430
414
  let providerMetadata;
431
415
  return {
432
416
  stream: response.pipeThrough(
433
417
  new TransformStream({
418
+ start(controller) {
419
+ controller.enqueue({ type: "stream-start", warnings });
420
+ },
434
421
  transform(chunk, controller) {
435
422
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
436
423
  if (!chunk.success) {
@@ -452,10 +439,8 @@ var GroqChatLanguageModel = class {
452
439
  });
453
440
  }
454
441
  if (((_a = value.x_groq) == null ? void 0 : _a.usage) != null) {
455
- usage = {
456
- promptTokens: (_b = value.x_groq.usage.prompt_tokens) != null ? _b : void 0,
457
- completionTokens: (_c = value.x_groq.usage.completion_tokens) != null ? _c : void 0
458
- };
442
+ usage.inputTokens = (_b = value.x_groq.usage.prompt_tokens) != null ? _b : void 0;
443
+ usage.outputTokens = (_c = value.x_groq.usage.completion_tokens) != null ? _c : void 0;
459
444
  }
460
445
  const choice = value.choices[0];
461
446
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -468,13 +453,14 @@ var GroqChatLanguageModel = class {
468
453
  if (delta.reasoning != null && delta.reasoning.length > 0) {
469
454
  controller.enqueue({
470
455
  type: "reasoning",
471
- textDelta: delta.reasoning
456
+ reasoningType: "text",
457
+ text: delta.reasoning
472
458
  });
473
459
  }
474
460
  if (delta.content != null && delta.content.length > 0) {
475
461
  controller.enqueue({
476
- type: "text-delta",
477
- textDelta: delta.content
462
+ type: "text",
463
+ text: delta.content
478
464
  });
479
465
  }
480
466
  if (delta.tool_calls != null) {
@@ -519,11 +505,11 @@ var GroqChatLanguageModel = class {
519
505
  argsTextDelta: toolCall2.function.arguments
520
506
  });
521
507
  }
522
- if ((0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
508
+ if ((0, import_provider_utils2.isParsableJson)(toolCall2.function.arguments)) {
523
509
  controller.enqueue({
524
510
  type: "tool-call",
525
511
  toolCallType: "function",
526
- toolCallId: (_h = toolCall2.id) != null ? _h : (0, import_provider_utils3.generateId)(),
512
+ toolCallId: (_h = toolCall2.id) != null ? _h : (0, import_provider_utils2.generateId)(),
527
513
  toolName: toolCall2.function.name,
528
514
  args: toolCall2.function.arguments
529
515
  });
@@ -546,11 +532,11 @@ var GroqChatLanguageModel = class {
546
532
  toolName: toolCall.function.name,
547
533
  argsTextDelta: (_l = toolCallDelta.function.arguments) != null ? _l : ""
548
534
  });
549
- if (((_m = toolCall.function) == null ? void 0 : _m.name) != null && ((_n = toolCall.function) == null ? void 0 : _n.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
535
+ if (((_m = toolCall.function) == null ? void 0 : _m.name) != null && ((_n = toolCall.function) == null ? void 0 : _n.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
550
536
  controller.enqueue({
551
537
  type: "tool-call",
552
538
  toolCallType: "function",
553
- toolCallId: (_o = toolCall.id) != null ? _o : (0, import_provider_utils3.generateId)(),
539
+ toolCallId: (_o = toolCall.id) != null ? _o : (0, import_provider_utils2.generateId)(),
554
540
  toolName: toolCall.function.name,
555
541
  args: toolCall.function.arguments
556
542
  });
@@ -560,91 +546,205 @@ var GroqChatLanguageModel = class {
560
546
  }
561
547
  },
562
548
  flush(controller) {
563
- var _a, _b;
564
549
  controller.enqueue({
565
550
  type: "finish",
566
551
  finishReason,
567
- usage: {
568
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
569
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
570
- },
552
+ usage,
571
553
  ...providerMetadata != null ? { providerMetadata } : {}
572
554
  });
573
555
  }
574
556
  })
575
557
  ),
576
- rawCall: { rawPrompt, rawSettings },
577
- rawResponse: { headers: responseHeaders },
578
- warnings,
579
- request: { body }
558
+ request: { body },
559
+ response: { headers: responseHeaders }
580
560
  };
581
561
  }
582
562
  };
583
- var groqChatResponseSchema = import_zod2.z.object({
584
- id: import_zod2.z.string().nullish(),
585
- created: import_zod2.z.number().nullish(),
586
- model: import_zod2.z.string().nullish(),
587
- choices: import_zod2.z.array(
588
- import_zod2.z.object({
589
- message: import_zod2.z.object({
590
- content: import_zod2.z.string().nullish(),
591
- reasoning: import_zod2.z.string().nullish(),
592
- tool_calls: import_zod2.z.array(
593
- import_zod2.z.object({
594
- id: import_zod2.z.string().nullish(),
595
- type: import_zod2.z.literal("function"),
596
- function: import_zod2.z.object({
597
- name: import_zod2.z.string(),
598
- arguments: import_zod2.z.string()
563
+ var groqChatResponseSchema = import_zod3.z.object({
564
+ id: import_zod3.z.string().nullish(),
565
+ created: import_zod3.z.number().nullish(),
566
+ model: import_zod3.z.string().nullish(),
567
+ choices: import_zod3.z.array(
568
+ import_zod3.z.object({
569
+ message: import_zod3.z.object({
570
+ content: import_zod3.z.string().nullish(),
571
+ reasoning: import_zod3.z.string().nullish(),
572
+ tool_calls: import_zod3.z.array(
573
+ import_zod3.z.object({
574
+ id: import_zod3.z.string().nullish(),
575
+ type: import_zod3.z.literal("function"),
576
+ function: import_zod3.z.object({
577
+ name: import_zod3.z.string(),
578
+ arguments: import_zod3.z.string()
599
579
  })
600
580
  })
601
581
  ).nullish()
602
582
  }),
603
- index: import_zod2.z.number(),
604
- finish_reason: import_zod2.z.string().nullish()
583
+ index: import_zod3.z.number(),
584
+ finish_reason: import_zod3.z.string().nullish()
605
585
  })
606
586
  ),
607
- usage: import_zod2.z.object({
608
- prompt_tokens: import_zod2.z.number().nullish(),
609
- completion_tokens: import_zod2.z.number().nullish()
587
+ usage: import_zod3.z.object({
588
+ prompt_tokens: import_zod3.z.number().nullish(),
589
+ completion_tokens: import_zod3.z.number().nullish()
610
590
  }).nullish()
611
591
  });
612
- var groqChatChunkSchema = import_zod2.z.union([
613
- import_zod2.z.object({
614
- id: import_zod2.z.string().nullish(),
615
- created: import_zod2.z.number().nullish(),
616
- model: import_zod2.z.string().nullish(),
617
- choices: import_zod2.z.array(
618
- import_zod2.z.object({
619
- delta: import_zod2.z.object({
620
- content: import_zod2.z.string().nullish(),
621
- reasoning: import_zod2.z.string().nullish(),
622
- tool_calls: import_zod2.z.array(
623
- import_zod2.z.object({
624
- index: import_zod2.z.number(),
625
- id: import_zod2.z.string().nullish(),
626
- type: import_zod2.z.literal("function").optional(),
627
- function: import_zod2.z.object({
628
- name: import_zod2.z.string().nullish(),
629
- arguments: import_zod2.z.string().nullish()
592
+ var groqChatChunkSchema = import_zod3.z.union([
593
+ import_zod3.z.object({
594
+ id: import_zod3.z.string().nullish(),
595
+ created: import_zod3.z.number().nullish(),
596
+ model: import_zod3.z.string().nullish(),
597
+ choices: import_zod3.z.array(
598
+ import_zod3.z.object({
599
+ delta: import_zod3.z.object({
600
+ content: import_zod3.z.string().nullish(),
601
+ reasoning: import_zod3.z.string().nullish(),
602
+ tool_calls: import_zod3.z.array(
603
+ import_zod3.z.object({
604
+ index: import_zod3.z.number(),
605
+ id: import_zod3.z.string().nullish(),
606
+ type: import_zod3.z.literal("function").optional(),
607
+ function: import_zod3.z.object({
608
+ name: import_zod3.z.string().nullish(),
609
+ arguments: import_zod3.z.string().nullish()
630
610
  })
631
611
  })
632
612
  ).nullish()
633
613
  }).nullish(),
634
- finish_reason: import_zod2.z.string().nullable().optional(),
635
- index: import_zod2.z.number()
614
+ finish_reason: import_zod3.z.string().nullable().optional(),
615
+ index: import_zod3.z.number()
636
616
  })
637
617
  ),
638
- x_groq: import_zod2.z.object({
639
- usage: import_zod2.z.object({
640
- prompt_tokens: import_zod2.z.number().nullish(),
641
- completion_tokens: import_zod2.z.number().nullish()
618
+ x_groq: import_zod3.z.object({
619
+ usage: import_zod3.z.object({
620
+ prompt_tokens: import_zod3.z.number().nullish(),
621
+ completion_tokens: import_zod3.z.number().nullish()
642
622
  }).nullish()
643
623
  }).nullish()
644
624
  }),
645
625
  groqErrorDataSchema
646
626
  ]);
647
627
 
628
+ // src/groq-transcription-model.ts
629
+ var import_provider_utils3 = require("@ai-sdk/provider-utils");
630
+ var import_zod4 = require("zod");
631
+ var groqProviderOptionsSchema = import_zod4.z.object({
632
+ language: import_zod4.z.string().nullish(),
633
+ prompt: import_zod4.z.string().nullish(),
634
+ responseFormat: import_zod4.z.string().nullish(),
635
+ temperature: import_zod4.z.number().min(0).max(1).nullish(),
636
+ timestampGranularities: import_zod4.z.array(import_zod4.z.string()).nullish()
637
+ });
638
+ var GroqTranscriptionModel = class {
639
+ constructor(modelId, config) {
640
+ this.modelId = modelId;
641
+ this.config = config;
642
+ this.specificationVersion = "v1";
643
+ }
644
+ get provider() {
645
+ return this.config.provider;
646
+ }
647
+ getArgs({
648
+ audio,
649
+ mediaType,
650
+ providerOptions
651
+ }) {
652
+ var _a, _b, _c, _d, _e;
653
+ const warnings = [];
654
+ const groqOptions = (0, import_provider_utils3.parseProviderOptions)({
655
+ provider: "groq",
656
+ providerOptions,
657
+ schema: groqProviderOptionsSchema
658
+ });
659
+ const formData = new FormData();
660
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([(0, import_provider_utils3.convertBase64ToUint8Array)(audio)]);
661
+ formData.append("model", this.modelId);
662
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
663
+ if (groqOptions) {
664
+ const transcriptionModelOptions = {
665
+ language: (_a = groqOptions.language) != null ? _a : void 0,
666
+ prompt: (_b = groqOptions.prompt) != null ? _b : void 0,
667
+ response_format: (_c = groqOptions.responseFormat) != null ? _c : void 0,
668
+ temperature: (_d = groqOptions.temperature) != null ? _d : void 0,
669
+ timestamp_granularities: (_e = groqOptions.timestampGranularities) != null ? _e : void 0
670
+ };
671
+ for (const key in transcriptionModelOptions) {
672
+ const value = transcriptionModelOptions[key];
673
+ if (value !== void 0) {
674
+ formData.append(key, String(value));
675
+ }
676
+ }
677
+ }
678
+ return {
679
+ formData,
680
+ warnings
681
+ };
682
+ }
683
+ async doGenerate(options) {
684
+ var _a, _b, _c, _d, _e;
685
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
686
+ const { formData, warnings } = this.getArgs(options);
687
+ const {
688
+ value: response,
689
+ responseHeaders,
690
+ rawValue: rawResponse
691
+ } = await (0, import_provider_utils3.postFormDataToApi)({
692
+ url: this.config.url({
693
+ path: "/audio/transcriptions",
694
+ modelId: this.modelId
695
+ }),
696
+ headers: (0, import_provider_utils3.combineHeaders)(this.config.headers(), options.headers),
697
+ formData,
698
+ failedResponseHandler: groqFailedResponseHandler,
699
+ successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
700
+ groqTranscriptionResponseSchema
701
+ ),
702
+ abortSignal: options.abortSignal,
703
+ fetch: this.config.fetch
704
+ });
705
+ return {
706
+ text: response.text,
707
+ segments: (_e = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
708
+ text: segment.text,
709
+ startSecond: segment.start,
710
+ endSecond: segment.end
711
+ }))) != null ? _e : [],
712
+ language: response.language,
713
+ durationInSeconds: response.duration,
714
+ warnings,
715
+ response: {
716
+ timestamp: currentDate,
717
+ modelId: this.modelId,
718
+ headers: responseHeaders,
719
+ body: rawResponse
720
+ }
721
+ };
722
+ }
723
+ };
724
+ var groqTranscriptionResponseSchema = import_zod4.z.object({
725
+ task: import_zod4.z.string(),
726
+ language: import_zod4.z.string(),
727
+ duration: import_zod4.z.number(),
728
+ text: import_zod4.z.string(),
729
+ segments: import_zod4.z.array(
730
+ import_zod4.z.object({
731
+ id: import_zod4.z.number(),
732
+ seek: import_zod4.z.number(),
733
+ start: import_zod4.z.number(),
734
+ end: import_zod4.z.number(),
735
+ text: import_zod4.z.string(),
736
+ tokens: import_zod4.z.array(import_zod4.z.number()),
737
+ temperature: import_zod4.z.number(),
738
+ avg_logprob: import_zod4.z.number(),
739
+ compression_ratio: import_zod4.z.number(),
740
+ no_speech_prob: import_zod4.z.number()
741
+ })
742
+ ),
743
+ x_groq: import_zod4.z.object({
744
+ id: import_zod4.z.string()
745
+ })
746
+ });
747
+
648
748
  // src/groq-provider.ts
649
749
  function createGroq(options = {}) {
650
750
  var _a;
@@ -657,22 +757,30 @@ function createGroq(options = {}) {
657
757
  })}`,
658
758
  ...options.headers
659
759
  });
660
- const createChatModel = (modelId, settings = {}) => new GroqChatLanguageModel(modelId, settings, {
760
+ const createChatModel = (modelId) => new GroqChatLanguageModel(modelId, {
661
761
  provider: "groq.chat",
662
762
  url: ({ path }) => `${baseURL}${path}`,
663
763
  headers: getHeaders,
664
764
  fetch: options.fetch
665
765
  });
666
- const createLanguageModel = (modelId, settings) => {
766
+ const createLanguageModel = (modelId) => {
667
767
  if (new.target) {
668
768
  throw new Error(
669
769
  "The Groq model function cannot be called with the new keyword."
670
770
  );
671
771
  }
672
- return createChatModel(modelId, settings);
772
+ return createChatModel(modelId);
773
+ };
774
+ const createTranscriptionModel = (modelId) => {
775
+ return new GroqTranscriptionModel(modelId, {
776
+ provider: "groq.transcription",
777
+ url: ({ path }) => `${baseURL}${path}`,
778
+ headers: getHeaders,
779
+ fetch: options.fetch
780
+ });
673
781
  };
674
- const provider = function(modelId, settings) {
675
- return createLanguageModel(modelId, settings);
782
+ const provider = function(modelId) {
783
+ return createLanguageModel(modelId);
676
784
  };
677
785
  provider.languageModel = createLanguageModel;
678
786
  provider.chat = createChatModel;
@@ -682,6 +790,7 @@ function createGroq(options = {}) {
682
790
  provider.imageModel = (modelId) => {
683
791
  throw new import_provider4.NoSuchModelError({ modelId, modelType: "imageModel" });
684
792
  };
793
+ provider.transcription = createTranscriptionModel;
685
794
  return provider;
686
795
  }
687
796
  var groq = createGroq();