@ai-sdk/groq 2.0.0-canary.1 → 2.0.0-canary.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -20,13 +20,12 @@ import {
20
20
  parseProviderOptions,
21
21
  postJsonToApi
22
22
  } from "@ai-sdk/provider-utils";
23
- import { z as z2 } from "zod";
23
+ import { z as z3 } from "zod";
24
24
 
25
25
  // src/convert-to-groq-chat-messages.ts
26
26
  import {
27
27
  UnsupportedFunctionalityError
28
28
  } from "@ai-sdk/provider";
29
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
30
29
  function convertToGroqChatMessages(prompt) {
31
30
  const messages = [];
32
31
  for (const { role, content } of prompt) {
@@ -43,24 +42,24 @@ function convertToGroqChatMessages(prompt) {
43
42
  messages.push({
44
43
  role: "user",
45
44
  content: content.map((part) => {
46
- var _a;
47
45
  switch (part.type) {
48
46
  case "text": {
49
47
  return { type: "text", text: part.text };
50
48
  }
51
- case "image": {
49
+ case "file": {
50
+ if (!part.mediaType.startsWith("image/")) {
51
+ throw new UnsupportedFunctionalityError({
52
+ functionality: "Non-image file content parts"
53
+ });
54
+ }
55
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
52
56
  return {
53
57
  type: "image_url",
54
58
  image_url: {
55
- url: part.image instanceof URL ? part.image.toString() : `data:${(_a = part.mimeType) != null ? _a : "image/jpeg"};base64,${convertUint8ArrayToBase64(part.image)}`
59
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
56
60
  }
57
61
  };
58
62
  }
59
- case "file": {
60
- throw new UnsupportedFunctionalityError({
61
- functionality: "File content parts in user messages"
62
- });
63
- }
64
63
  }
65
64
  })
66
65
  });
@@ -127,13 +126,28 @@ function getResponseMetadata({
127
126
  };
128
127
  }
129
128
 
130
- // src/groq-error.ts
129
+ // src/groq-chat-options.ts
131
130
  import { z } from "zod";
131
+ var groqProviderOptions = z.object({
132
+ reasoningFormat: z.enum(["parsed", "raw", "hidden"]).nullish(),
133
+ /**
134
+ * Whether to enable parallel function calling during tool use. Default to true.
135
+ */
136
+ parallelToolCalls: z.boolean().nullish(),
137
+ /**
138
+ * A unique identifier representing your end-user, which can help OpenAI to
139
+ * monitor and detect abuse. Learn more.
140
+ */
141
+ user: z.string().nullish()
142
+ });
143
+
144
+ // src/groq-error.ts
145
+ import { z as z2 } from "zod";
132
146
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
133
- var groqErrorDataSchema = z.object({
134
- error: z.object({
135
- message: z.string(),
136
- type: z.string()
147
+ var groqErrorDataSchema = z2.object({
148
+ error: z2.object({
149
+ message: z2.string(),
150
+ type: z2.string()
137
151
  })
138
152
  });
139
153
  var groqFailedResponseHandler = createJsonErrorResponseHandler({
@@ -146,15 +160,14 @@ import {
146
160
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
147
161
  } from "@ai-sdk/provider";
148
162
  function prepareTools({
149
- mode
163
+ tools,
164
+ toolChoice
150
165
  }) {
151
- var _a;
152
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
166
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
153
167
  const toolWarnings = [];
154
168
  if (tools == null) {
155
- return { tools: void 0, tool_choice: void 0, toolWarnings };
169
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
156
170
  }
157
- const toolChoice = mode.toolChoice;
158
171
  const groqTools = [];
159
172
  for (const tool of tools) {
160
173
  if (tool.type === "provider-defined") {
@@ -171,18 +184,18 @@ function prepareTools({
171
184
  }
172
185
  }
173
186
  if (toolChoice == null) {
174
- return { tools: groqTools, tool_choice: void 0, toolWarnings };
187
+ return { tools: groqTools, toolChoice: void 0, toolWarnings };
175
188
  }
176
189
  const type = toolChoice.type;
177
190
  switch (type) {
178
191
  case "auto":
179
192
  case "none":
180
193
  case "required":
181
- return { tools: groqTools, tool_choice: type, toolWarnings };
194
+ return { tools: groqTools, toolChoice: type, toolWarnings };
182
195
  case "tool":
183
196
  return {
184
197
  tools: groqTools,
185
- tool_choice: {
198
+ toolChoice: {
186
199
  type: "function",
187
200
  function: {
188
201
  name: toolChoice.toolName
@@ -193,7 +206,7 @@ function prepareTools({
193
206
  default: {
194
207
  const _exhaustiveCheck = type;
195
208
  throw new UnsupportedFunctionalityError2({
196
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
209
+ functionality: `tool choice type: ${_exhaustiveCheck}`
197
210
  });
198
211
  }
199
212
  }
@@ -218,24 +231,22 @@ function mapGroqFinishReason(finishReason) {
218
231
 
219
232
  // src/groq-chat-language-model.ts
220
233
  var GroqChatLanguageModel = class {
221
- constructor(modelId, settings, config) {
234
+ constructor(modelId, config) {
222
235
  this.specificationVersion = "v2";
223
- this.supportsStructuredOutputs = false;
224
- this.defaultObjectGenerationMode = "json";
225
236
  this.modelId = modelId;
226
- this.settings = settings;
227
237
  this.config = config;
228
238
  }
229
239
  get provider() {
230
240
  return this.config.provider;
231
241
  }
232
- get supportsImageUrls() {
233
- return !this.settings.downloadImages;
242
+ async getSupportedUrls() {
243
+ return {
244
+ "image/*": [/^https:\/\/.*$/]
245
+ };
234
246
  }
235
247
  getArgs({
236
- mode,
237
248
  prompt,
238
- maxTokens,
249
+ maxOutputTokens,
239
250
  temperature,
240
251
  topP,
241
252
  topK,
@@ -245,9 +256,10 @@ var GroqChatLanguageModel = class {
245
256
  responseFormat,
246
257
  seed,
247
258
  stream,
248
- providerMetadata
259
+ tools,
260
+ toolChoice,
261
+ providerOptions
249
262
  }) {
250
- const type = mode.type;
251
263
  const warnings = [];
252
264
  if (topK != null) {
253
265
  warnings.push({
@@ -264,89 +276,47 @@ var GroqChatLanguageModel = class {
264
276
  }
265
277
  const groqOptions = parseProviderOptions({
266
278
  provider: "groq",
267
- providerOptions: providerMetadata,
268
- schema: z2.object({
269
- reasoningFormat: z2.enum(["parsed", "raw", "hidden"]).nullish()
270
- })
279
+ providerOptions,
280
+ schema: groqProviderOptions
271
281
  });
272
- const baseArgs = {
273
- // model id:
274
- model: this.modelId,
275
- // model specific settings:
276
- user: this.settings.user,
277
- parallel_tool_calls: this.settings.parallelToolCalls,
278
- // standardized settings:
279
- max_tokens: maxTokens,
280
- temperature,
281
- top_p: topP,
282
- frequency_penalty: frequencyPenalty,
283
- presence_penalty: presencePenalty,
284
- stop: stopSequences,
285
- seed,
286
- // response format:
287
- response_format: (
288
- // json object response format is not supported for streaming:
289
- stream === false && (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0
290
- ),
291
- // provider options:
292
- reasoning_format: groqOptions == null ? void 0 : groqOptions.reasoningFormat,
293
- // messages:
294
- messages: convertToGroqChatMessages(prompt)
282
+ const {
283
+ tools: groqTools,
284
+ toolChoice: groqToolChoice,
285
+ toolWarnings
286
+ } = prepareTools({ tools, toolChoice });
287
+ return {
288
+ args: {
289
+ // model id:
290
+ model: this.modelId,
291
+ // model specific settings:
292
+ user: groqOptions == null ? void 0 : groqOptions.user,
293
+ parallel_tool_calls: groqOptions == null ? void 0 : groqOptions.parallelToolCalls,
294
+ // standardized settings:
295
+ max_tokens: maxOutputTokens,
296
+ temperature,
297
+ top_p: topP,
298
+ frequency_penalty: frequencyPenalty,
299
+ presence_penalty: presencePenalty,
300
+ stop: stopSequences,
301
+ seed,
302
+ // response format:
303
+ response_format: (
304
+ // json object response format is not supported for streaming:
305
+ stream === false && (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0
306
+ ),
307
+ // provider options:
308
+ reasoning_format: groqOptions == null ? void 0 : groqOptions.reasoningFormat,
309
+ // messages:
310
+ messages: convertToGroqChatMessages(prompt),
311
+ // tools:
312
+ tools: groqTools,
313
+ tool_choice: groqToolChoice
314
+ },
315
+ warnings: [...warnings, ...toolWarnings]
295
316
  };
296
- switch (type) {
297
- case "regular": {
298
- const { tools, tool_choice, toolWarnings } = prepareTools({ mode });
299
- return {
300
- args: {
301
- ...baseArgs,
302
- tools,
303
- tool_choice
304
- },
305
- warnings: [...warnings, ...toolWarnings]
306
- };
307
- }
308
- case "object-json": {
309
- return {
310
- args: {
311
- ...baseArgs,
312
- response_format: (
313
- // json object response format is not supported for streaming:
314
- stream === false ? { type: "json_object" } : void 0
315
- )
316
- },
317
- warnings
318
- };
319
- }
320
- case "object-tool": {
321
- return {
322
- args: {
323
- ...baseArgs,
324
- tool_choice: {
325
- type: "function",
326
- function: { name: mode.tool.name }
327
- },
328
- tools: [
329
- {
330
- type: "function",
331
- function: {
332
- name: mode.tool.name,
333
- description: mode.tool.description,
334
- parameters: mode.tool.parameters
335
- }
336
- }
337
- ]
338
- },
339
- warnings
340
- };
341
- }
342
- default: {
343
- const _exhaustiveCheck = type;
344
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
345
- }
346
- }
347
317
  }
348
318
  async doGenerate(options) {
349
- var _a, _b, _c, _d, _e, _f, _g;
319
+ var _a, _b, _c, _d, _e;
350
320
  const { args, warnings } = this.getArgs({ ...options, stream: false });
351
321
  const body = JSON.stringify(args);
352
322
  const {
@@ -367,28 +337,43 @@ var GroqChatLanguageModel = class {
367
337
  abortSignal: options.abortSignal,
368
338
  fetch: this.config.fetch
369
339
  });
370
- const { messages: rawPrompt, ...rawSettings } = args;
371
340
  const choice = response.choices[0];
372
- return {
373
- text: (_a = choice.message.content) != null ? _a : void 0,
374
- reasoning: (_b = choice.message.reasoning) != null ? _b : void 0,
375
- toolCalls: (_c = choice.message.tool_calls) == null ? void 0 : _c.map((toolCall) => {
376
- var _a2;
377
- return {
341
+ const content = [];
342
+ const text = choice.message.content;
343
+ if (text != null && text.length > 0) {
344
+ content.push({ type: "text", text });
345
+ }
346
+ const reasoning = choice.message.reasoning;
347
+ if (reasoning != null && reasoning.length > 0) {
348
+ content.push({
349
+ type: "reasoning",
350
+ reasoningType: "text",
351
+ text: reasoning
352
+ });
353
+ }
354
+ if (choice.message.tool_calls != null) {
355
+ for (const toolCall of choice.message.tool_calls) {
356
+ content.push({
357
+ type: "tool-call",
378
358
  toolCallType: "function",
379
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : generateId(),
359
+ toolCallId: (_a = toolCall.id) != null ? _a : generateId(),
380
360
  toolName: toolCall.function.name,
381
361
  args: toolCall.function.arguments
382
- };
383
- }),
362
+ });
363
+ }
364
+ }
365
+ return {
366
+ content,
384
367
  finishReason: mapGroqFinishReason(choice.finish_reason),
385
368
  usage: {
386
- promptTokens: (_e = (_d = response.usage) == null ? void 0 : _d.prompt_tokens) != null ? _e : NaN,
387
- completionTokens: (_g = (_f = response.usage) == null ? void 0 : _f.completion_tokens) != null ? _g : NaN
369
+ inputTokens: (_c = (_b = response.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : void 0,
370
+ outputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : void 0
371
+ },
372
+ response: {
373
+ ...getResponseMetadata(response),
374
+ headers: responseHeaders,
375
+ body: rawResponse
388
376
  },
389
- rawCall: { rawPrompt, rawSettings },
390
- rawResponse: { headers: responseHeaders, body: rawResponse },
391
- response: getResponseMetadata(response),
392
377
  warnings,
393
378
  request: { body }
394
379
  };
@@ -411,18 +396,20 @@ var GroqChatLanguageModel = class {
411
396
  abortSignal: options.abortSignal,
412
397
  fetch: this.config.fetch
413
398
  });
414
- const { messages: rawPrompt, ...rawSettings } = args;
415
399
  const toolCalls = [];
416
400
  let finishReason = "unknown";
417
- let usage = {
418
- promptTokens: void 0,
419
- completionTokens: void 0
401
+ const usage = {
402
+ inputTokens: void 0,
403
+ outputTokens: void 0
420
404
  };
421
405
  let isFirstChunk = true;
422
406
  let providerMetadata;
423
407
  return {
424
408
  stream: response.pipeThrough(
425
409
  new TransformStream({
410
+ start(controller) {
411
+ controller.enqueue({ type: "stream-start", warnings });
412
+ },
426
413
  transform(chunk, controller) {
427
414
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
428
415
  if (!chunk.success) {
@@ -444,10 +431,8 @@ var GroqChatLanguageModel = class {
444
431
  });
445
432
  }
446
433
  if (((_a = value.x_groq) == null ? void 0 : _a.usage) != null) {
447
- usage = {
448
- promptTokens: (_b = value.x_groq.usage.prompt_tokens) != null ? _b : void 0,
449
- completionTokens: (_c = value.x_groq.usage.completion_tokens) != null ? _c : void 0
450
- };
434
+ usage.inputTokens = (_b = value.x_groq.usage.prompt_tokens) != null ? _b : void 0;
435
+ usage.outputTokens = (_c = value.x_groq.usage.completion_tokens) != null ? _c : void 0;
451
436
  }
452
437
  const choice = value.choices[0];
453
438
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -460,13 +445,14 @@ var GroqChatLanguageModel = class {
460
445
  if (delta.reasoning != null && delta.reasoning.length > 0) {
461
446
  controller.enqueue({
462
447
  type: "reasoning",
463
- textDelta: delta.reasoning
448
+ reasoningType: "text",
449
+ text: delta.reasoning
464
450
  });
465
451
  }
466
452
  if (delta.content != null && delta.content.length > 0) {
467
453
  controller.enqueue({
468
- type: "text-delta",
469
- textDelta: delta.content
454
+ type: "text",
455
+ text: delta.content
470
456
  });
471
457
  }
472
458
  if (delta.tool_calls != null) {
@@ -552,91 +538,211 @@ var GroqChatLanguageModel = class {
552
538
  }
553
539
  },
554
540
  flush(controller) {
555
- var _a, _b;
556
541
  controller.enqueue({
557
542
  type: "finish",
558
543
  finishReason,
559
- usage: {
560
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
561
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
562
- },
544
+ usage,
563
545
  ...providerMetadata != null ? { providerMetadata } : {}
564
546
  });
565
547
  }
566
548
  })
567
549
  ),
568
- rawCall: { rawPrompt, rawSettings },
569
- rawResponse: { headers: responseHeaders },
570
- warnings,
571
- request: { body }
550
+ request: { body },
551
+ response: { headers: responseHeaders }
572
552
  };
573
553
  }
574
554
  };
575
- var groqChatResponseSchema = z2.object({
576
- id: z2.string().nullish(),
577
- created: z2.number().nullish(),
578
- model: z2.string().nullish(),
579
- choices: z2.array(
580
- z2.object({
581
- message: z2.object({
582
- content: z2.string().nullish(),
583
- reasoning: z2.string().nullish(),
584
- tool_calls: z2.array(
585
- z2.object({
586
- id: z2.string().nullish(),
587
- type: z2.literal("function"),
588
- function: z2.object({
589
- name: z2.string(),
590
- arguments: z2.string()
555
+ var groqChatResponseSchema = z3.object({
556
+ id: z3.string().nullish(),
557
+ created: z3.number().nullish(),
558
+ model: z3.string().nullish(),
559
+ choices: z3.array(
560
+ z3.object({
561
+ message: z3.object({
562
+ content: z3.string().nullish(),
563
+ reasoning: z3.string().nullish(),
564
+ tool_calls: z3.array(
565
+ z3.object({
566
+ id: z3.string().nullish(),
567
+ type: z3.literal("function"),
568
+ function: z3.object({
569
+ name: z3.string(),
570
+ arguments: z3.string()
591
571
  })
592
572
  })
593
573
  ).nullish()
594
574
  }),
595
- index: z2.number(),
596
- finish_reason: z2.string().nullish()
575
+ index: z3.number(),
576
+ finish_reason: z3.string().nullish()
597
577
  })
598
578
  ),
599
- usage: z2.object({
600
- prompt_tokens: z2.number().nullish(),
601
- completion_tokens: z2.number().nullish()
579
+ usage: z3.object({
580
+ prompt_tokens: z3.number().nullish(),
581
+ completion_tokens: z3.number().nullish()
602
582
  }).nullish()
603
583
  });
604
- var groqChatChunkSchema = z2.union([
605
- z2.object({
606
- id: z2.string().nullish(),
607
- created: z2.number().nullish(),
608
- model: z2.string().nullish(),
609
- choices: z2.array(
610
- z2.object({
611
- delta: z2.object({
612
- content: z2.string().nullish(),
613
- reasoning: z2.string().nullish(),
614
- tool_calls: z2.array(
615
- z2.object({
616
- index: z2.number(),
617
- id: z2.string().nullish(),
618
- type: z2.literal("function").optional(),
619
- function: z2.object({
620
- name: z2.string().nullish(),
621
- arguments: z2.string().nullish()
584
+ var groqChatChunkSchema = z3.union([
585
+ z3.object({
586
+ id: z3.string().nullish(),
587
+ created: z3.number().nullish(),
588
+ model: z3.string().nullish(),
589
+ choices: z3.array(
590
+ z3.object({
591
+ delta: z3.object({
592
+ content: z3.string().nullish(),
593
+ reasoning: z3.string().nullish(),
594
+ tool_calls: z3.array(
595
+ z3.object({
596
+ index: z3.number(),
597
+ id: z3.string().nullish(),
598
+ type: z3.literal("function").optional(),
599
+ function: z3.object({
600
+ name: z3.string().nullish(),
601
+ arguments: z3.string().nullish()
622
602
  })
623
603
  })
624
604
  ).nullish()
625
605
  }).nullish(),
626
- finish_reason: z2.string().nullable().optional(),
627
- index: z2.number()
606
+ finish_reason: z3.string().nullable().optional(),
607
+ index: z3.number()
628
608
  })
629
609
  ),
630
- x_groq: z2.object({
631
- usage: z2.object({
632
- prompt_tokens: z2.number().nullish(),
633
- completion_tokens: z2.number().nullish()
610
+ x_groq: z3.object({
611
+ usage: z3.object({
612
+ prompt_tokens: z3.number().nullish(),
613
+ completion_tokens: z3.number().nullish()
634
614
  }).nullish()
635
615
  }).nullish()
636
616
  }),
637
617
  groqErrorDataSchema
638
618
  ]);
639
619
 
620
+ // src/groq-transcription-model.ts
621
+ import {
622
+ combineHeaders as combineHeaders2,
623
+ convertBase64ToUint8Array,
624
+ createJsonResponseHandler as createJsonResponseHandler2,
625
+ parseProviderOptions as parseProviderOptions2,
626
+ postFormDataToApi
627
+ } from "@ai-sdk/provider-utils";
628
+ import { z as z4 } from "zod";
629
+ var groqProviderOptionsSchema = z4.object({
630
+ language: z4.string().nullish(),
631
+ prompt: z4.string().nullish(),
632
+ responseFormat: z4.string().nullish(),
633
+ temperature: z4.number().min(0).max(1).nullish(),
634
+ timestampGranularities: z4.array(z4.string()).nullish()
635
+ });
636
+ var GroqTranscriptionModel = class {
637
+ constructor(modelId, config) {
638
+ this.modelId = modelId;
639
+ this.config = config;
640
+ this.specificationVersion = "v1";
641
+ }
642
+ get provider() {
643
+ return this.config.provider;
644
+ }
645
+ getArgs({
646
+ audio,
647
+ mediaType,
648
+ providerOptions
649
+ }) {
650
+ var _a, _b, _c, _d, _e;
651
+ const warnings = [];
652
+ const groqOptions = parseProviderOptions2({
653
+ provider: "groq",
654
+ providerOptions,
655
+ schema: groqProviderOptionsSchema
656
+ });
657
+ const formData = new FormData();
658
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
659
+ formData.append("model", this.modelId);
660
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
661
+ if (groqOptions) {
662
+ const transcriptionModelOptions = {
663
+ language: (_a = groqOptions.language) != null ? _a : void 0,
664
+ prompt: (_b = groqOptions.prompt) != null ? _b : void 0,
665
+ response_format: (_c = groqOptions.responseFormat) != null ? _c : void 0,
666
+ temperature: (_d = groqOptions.temperature) != null ? _d : void 0,
667
+ timestamp_granularities: (_e = groqOptions.timestampGranularities) != null ? _e : void 0
668
+ };
669
+ for (const key in transcriptionModelOptions) {
670
+ const value = transcriptionModelOptions[key];
671
+ if (value !== void 0) {
672
+ formData.append(key, String(value));
673
+ }
674
+ }
675
+ }
676
+ return {
677
+ formData,
678
+ warnings
679
+ };
680
+ }
681
+ async doGenerate(options) {
682
+ var _a, _b, _c, _d, _e;
683
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
684
+ const { formData, warnings } = this.getArgs(options);
685
+ const {
686
+ value: response,
687
+ responseHeaders,
688
+ rawValue: rawResponse
689
+ } = await postFormDataToApi({
690
+ url: this.config.url({
691
+ path: "/audio/transcriptions",
692
+ modelId: this.modelId
693
+ }),
694
+ headers: combineHeaders2(this.config.headers(), options.headers),
695
+ formData,
696
+ failedResponseHandler: groqFailedResponseHandler,
697
+ successfulResponseHandler: createJsonResponseHandler2(
698
+ groqTranscriptionResponseSchema
699
+ ),
700
+ abortSignal: options.abortSignal,
701
+ fetch: this.config.fetch
702
+ });
703
+ return {
704
+ text: response.text,
705
+ segments: (_e = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
706
+ text: segment.text,
707
+ startSecond: segment.start,
708
+ endSecond: segment.end
709
+ }))) != null ? _e : [],
710
+ language: response.language,
711
+ durationInSeconds: response.duration,
712
+ warnings,
713
+ response: {
714
+ timestamp: currentDate,
715
+ modelId: this.modelId,
716
+ headers: responseHeaders,
717
+ body: rawResponse
718
+ }
719
+ };
720
+ }
721
+ };
722
+ var groqTranscriptionResponseSchema = z4.object({
723
+ task: z4.string(),
724
+ language: z4.string(),
725
+ duration: z4.number(),
726
+ text: z4.string(),
727
+ segments: z4.array(
728
+ z4.object({
729
+ id: z4.number(),
730
+ seek: z4.number(),
731
+ start: z4.number(),
732
+ end: z4.number(),
733
+ text: z4.string(),
734
+ tokens: z4.array(z4.number()),
735
+ temperature: z4.number(),
736
+ avg_logprob: z4.number(),
737
+ compression_ratio: z4.number(),
738
+ no_speech_prob: z4.number()
739
+ })
740
+ ),
741
+ x_groq: z4.object({
742
+ id: z4.string()
743
+ })
744
+ });
745
+
640
746
  // src/groq-provider.ts
641
747
  function createGroq(options = {}) {
642
748
  var _a;
@@ -649,22 +755,30 @@ function createGroq(options = {}) {
649
755
  })}`,
650
756
  ...options.headers
651
757
  });
652
- const createChatModel = (modelId, settings = {}) => new GroqChatLanguageModel(modelId, settings, {
758
+ const createChatModel = (modelId) => new GroqChatLanguageModel(modelId, {
653
759
  provider: "groq.chat",
654
760
  url: ({ path }) => `${baseURL}${path}`,
655
761
  headers: getHeaders,
656
762
  fetch: options.fetch
657
763
  });
658
- const createLanguageModel = (modelId, settings) => {
764
+ const createLanguageModel = (modelId) => {
659
765
  if (new.target) {
660
766
  throw new Error(
661
767
  "The Groq model function cannot be called with the new keyword."
662
768
  );
663
769
  }
664
- return createChatModel(modelId, settings);
770
+ return createChatModel(modelId);
771
+ };
772
+ const createTranscriptionModel = (modelId) => {
773
+ return new GroqTranscriptionModel(modelId, {
774
+ provider: "groq.transcription",
775
+ url: ({ path }) => `${baseURL}${path}`,
776
+ headers: getHeaders,
777
+ fetch: options.fetch
778
+ });
665
779
  };
666
- const provider = function(modelId, settings) {
667
- return createLanguageModel(modelId, settings);
780
+ const provider = function(modelId) {
781
+ return createLanguageModel(modelId);
668
782
  };
669
783
  provider.languageModel = createLanguageModel;
670
784
  provider.chat = createChatModel;
@@ -674,6 +788,7 @@ function createGroq(options = {}) {
674
788
  provider.imageModel = (modelId) => {
675
789
  throw new NoSuchModelError({ modelId, modelType: "imageModel" });
676
790
  };
791
+ provider.transcription = createTranscriptionModel;
677
792
  return provider;
678
793
  }
679
794
  var groq = createGroq();