@ai-sdk/groq 0.0.0-013d7476-20250808163325

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,860 @@
1
+ // src/groq-provider.ts
2
+ import {
3
+ NoSuchModelError
4
+ } from "@ai-sdk/provider";
5
+ import {
6
+ loadApiKey,
7
+ withoutTrailingSlash
8
+ } from "@ai-sdk/provider-utils";
9
+
10
+ // src/groq-chat-language-model.ts
11
+ import {
12
+ InvalidResponseDataError
13
+ } from "@ai-sdk/provider";
14
+ import {
15
+ combineHeaders,
16
+ createEventSourceResponseHandler,
17
+ createJsonResponseHandler,
18
+ generateId,
19
+ isParsableJson,
20
+ parseProviderOptions,
21
+ postJsonToApi
22
+ } from "@ai-sdk/provider-utils";
23
+ import { z as z3 } from "zod/v4";
24
+
25
+ // src/convert-to-groq-chat-messages.ts
26
+ import {
27
+ UnsupportedFunctionalityError
28
+ } from "@ai-sdk/provider";
29
+ function convertToGroqChatMessages(prompt) {
30
+ const messages = [];
31
+ for (const { role, content } of prompt) {
32
+ switch (role) {
33
+ case "system": {
34
+ messages.push({ role: "system", content });
35
+ break;
36
+ }
37
+ case "user": {
38
+ if (content.length === 1 && content[0].type === "text") {
39
+ messages.push({ role: "user", content: content[0].text });
40
+ break;
41
+ }
42
+ messages.push({
43
+ role: "user",
44
+ content: content.map((part) => {
45
+ switch (part.type) {
46
+ case "text": {
47
+ return { type: "text", text: part.text };
48
+ }
49
+ case "file": {
50
+ if (!part.mediaType.startsWith("image/")) {
51
+ throw new UnsupportedFunctionalityError({
52
+ functionality: "Non-image file content parts"
53
+ });
54
+ }
55
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
56
+ return {
57
+ type: "image_url",
58
+ image_url: {
59
+ url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`
60
+ }
61
+ };
62
+ }
63
+ }
64
+ })
65
+ });
66
+ break;
67
+ }
68
+ case "assistant": {
69
+ let text = "";
70
+ const toolCalls = [];
71
+ for (const part of content) {
72
+ switch (part.type) {
73
+ case "text": {
74
+ text += part.text;
75
+ break;
76
+ }
77
+ case "tool-call": {
78
+ toolCalls.push({
79
+ id: part.toolCallId,
80
+ type: "function",
81
+ function: {
82
+ name: part.toolName,
83
+ arguments: JSON.stringify(part.input)
84
+ }
85
+ });
86
+ break;
87
+ }
88
+ }
89
+ }
90
+ messages.push({
91
+ role: "assistant",
92
+ content: text,
93
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
94
+ });
95
+ break;
96
+ }
97
+ case "tool": {
98
+ for (const toolResponse of content) {
99
+ const output = toolResponse.output;
100
+ let contentValue;
101
+ switch (output.type) {
102
+ case "text":
103
+ case "error-text":
104
+ contentValue = output.value;
105
+ break;
106
+ case "content":
107
+ case "json":
108
+ case "error-json":
109
+ contentValue = JSON.stringify(output.value);
110
+ break;
111
+ }
112
+ messages.push({
113
+ role: "tool",
114
+ tool_call_id: toolResponse.toolCallId,
115
+ content: contentValue
116
+ });
117
+ }
118
+ break;
119
+ }
120
+ default: {
121
+ const _exhaustiveCheck = role;
122
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
123
+ }
124
+ }
125
+ }
126
+ return messages;
127
+ }
128
+
129
+ // src/get-response-metadata.ts
130
+ function getResponseMetadata({
131
+ id,
132
+ model,
133
+ created
134
+ }) {
135
+ return {
136
+ id: id != null ? id : void 0,
137
+ modelId: model != null ? model : void 0,
138
+ timestamp: created != null ? new Date(created * 1e3) : void 0
139
+ };
140
+ }
141
+
142
+ // src/groq-chat-options.ts
143
+ import { z } from "zod/v4";
144
+ var groqProviderOptions = z.object({
145
+ reasoningFormat: z.enum(["parsed", "raw", "hidden"]).optional(),
146
+ reasoningEffort: z.string().optional(),
147
+ /**
148
+ * Whether to enable parallel function calling during tool use. Default to true.
149
+ */
150
+ parallelToolCalls: z.boolean().optional(),
151
+ /**
152
+ * A unique identifier representing your end-user, which can help OpenAI to
153
+ * monitor and detect abuse. Learn more.
154
+ */
155
+ user: z.string().optional(),
156
+ /**
157
+ * Whether to use structured outputs.
158
+ *
159
+ * @default true
160
+ */
161
+ structuredOutputs: z.boolean().optional()
162
+ });
163
+
164
+ // src/groq-error.ts
165
+ import { z as z2 } from "zod/v4";
166
+ import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
167
+ var groqErrorDataSchema = z2.object({
168
+ error: z2.object({
169
+ message: z2.string(),
170
+ type: z2.string()
171
+ })
172
+ });
173
+ var groqFailedResponseHandler = createJsonErrorResponseHandler({
174
+ errorSchema: groqErrorDataSchema,
175
+ errorToMessage: (data) => data.error.message
176
+ });
177
+
178
+ // src/groq-prepare-tools.ts
179
+ import {
180
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
181
+ } from "@ai-sdk/provider";
182
+ function prepareTools({
183
+ tools,
184
+ toolChoice
185
+ }) {
186
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
187
+ const toolWarnings = [];
188
+ if (tools == null) {
189
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
190
+ }
191
+ const groqTools = [];
192
+ for (const tool of tools) {
193
+ if (tool.type === "provider-defined") {
194
+ toolWarnings.push({ type: "unsupported-tool", tool });
195
+ } else {
196
+ groqTools.push({
197
+ type: "function",
198
+ function: {
199
+ name: tool.name,
200
+ description: tool.description,
201
+ parameters: tool.inputSchema
202
+ }
203
+ });
204
+ }
205
+ }
206
+ if (toolChoice == null) {
207
+ return { tools: groqTools, toolChoice: void 0, toolWarnings };
208
+ }
209
+ const type = toolChoice.type;
210
+ switch (type) {
211
+ case "auto":
212
+ case "none":
213
+ case "required":
214
+ return { tools: groqTools, toolChoice: type, toolWarnings };
215
+ case "tool":
216
+ return {
217
+ tools: groqTools,
218
+ toolChoice: {
219
+ type: "function",
220
+ function: {
221
+ name: toolChoice.toolName
222
+ }
223
+ },
224
+ toolWarnings
225
+ };
226
+ default: {
227
+ const _exhaustiveCheck = type;
228
+ throw new UnsupportedFunctionalityError2({
229
+ functionality: `tool choice type: ${_exhaustiveCheck}`
230
+ });
231
+ }
232
+ }
233
+ }
234
+
235
+ // src/map-groq-finish-reason.ts
236
+ function mapGroqFinishReason(finishReason) {
237
+ switch (finishReason) {
238
+ case "stop":
239
+ return "stop";
240
+ case "length":
241
+ return "length";
242
+ case "content_filter":
243
+ return "content-filter";
244
+ case "function_call":
245
+ case "tool_calls":
246
+ return "tool-calls";
247
+ default:
248
+ return "unknown";
249
+ }
250
+ }
251
+
252
+ // src/groq-chat-language-model.ts
253
+ var GroqChatLanguageModel = class {
254
+ constructor(modelId, config) {
255
+ this.specificationVersion = "v2";
256
+ this.supportedUrls = {
257
+ "image/*": [/^https?:\/\/.*$/]
258
+ };
259
+ this.modelId = modelId;
260
+ this.config = config;
261
+ }
262
+ get provider() {
263
+ return this.config.provider;
264
+ }
265
+ async getArgs({
266
+ prompt,
267
+ maxOutputTokens,
268
+ temperature,
269
+ topP,
270
+ topK,
271
+ frequencyPenalty,
272
+ presencePenalty,
273
+ stopSequences,
274
+ responseFormat,
275
+ seed,
276
+ stream,
277
+ tools,
278
+ toolChoice,
279
+ providerOptions
280
+ }) {
281
+ var _a, _b;
282
+ const warnings = [];
283
+ const groqOptions = await parseProviderOptions({
284
+ provider: "groq",
285
+ providerOptions,
286
+ schema: groqProviderOptions
287
+ });
288
+ const structuredOutputs = (_a = groqOptions == null ? void 0 : groqOptions.structuredOutputs) != null ? _a : true;
289
+ if (topK != null) {
290
+ warnings.push({
291
+ type: "unsupported-setting",
292
+ setting: "topK"
293
+ });
294
+ }
295
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
296
+ warnings.push({
297
+ type: "unsupported-setting",
298
+ setting: "responseFormat",
299
+ details: "JSON response format schema is only supported with structuredOutputs"
300
+ });
301
+ }
302
+ const {
303
+ tools: groqTools,
304
+ toolChoice: groqToolChoice,
305
+ toolWarnings
306
+ } = prepareTools({ tools, toolChoice });
307
+ return {
308
+ args: {
309
+ // model id:
310
+ model: this.modelId,
311
+ // model specific settings:
312
+ user: groqOptions == null ? void 0 : groqOptions.user,
313
+ parallel_tool_calls: groqOptions == null ? void 0 : groqOptions.parallelToolCalls,
314
+ // standardized settings:
315
+ max_tokens: maxOutputTokens,
316
+ temperature,
317
+ top_p: topP,
318
+ frequency_penalty: frequencyPenalty,
319
+ presence_penalty: presencePenalty,
320
+ stop: stopSequences,
321
+ seed,
322
+ // response format:
323
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
324
+ type: "json_schema",
325
+ json_schema: {
326
+ schema: responseFormat.schema,
327
+ name: (_b = responseFormat.name) != null ? _b : "response",
328
+ description: responseFormat.description
329
+ }
330
+ } : { type: "json_object" } : void 0,
331
+ // provider options:
332
+ reasoning_format: groqOptions == null ? void 0 : groqOptions.reasoningFormat,
333
+ reasoning_effort: groqOptions == null ? void 0 : groqOptions.reasoningEffort,
334
+ // messages:
335
+ messages: convertToGroqChatMessages(prompt),
336
+ // tools:
337
+ tools: groqTools,
338
+ tool_choice: groqToolChoice
339
+ },
340
+ warnings: [...warnings, ...toolWarnings]
341
+ };
342
+ }
343
+ async doGenerate(options) {
344
+ var _a, _b, _c, _d, _e, _f, _g;
345
+ const { args, warnings } = await this.getArgs({
346
+ ...options,
347
+ stream: false
348
+ });
349
+ const body = JSON.stringify(args);
350
+ const {
351
+ responseHeaders,
352
+ value: response,
353
+ rawValue: rawResponse
354
+ } = await postJsonToApi({
355
+ url: this.config.url({
356
+ path: "/chat/completions",
357
+ modelId: this.modelId
358
+ }),
359
+ headers: combineHeaders(this.config.headers(), options.headers),
360
+ body: args,
361
+ failedResponseHandler: groqFailedResponseHandler,
362
+ successfulResponseHandler: createJsonResponseHandler(
363
+ groqChatResponseSchema
364
+ ),
365
+ abortSignal: options.abortSignal,
366
+ fetch: this.config.fetch
367
+ });
368
+ const choice = response.choices[0];
369
+ const content = [];
370
+ const text = choice.message.content;
371
+ if (text != null && text.length > 0) {
372
+ content.push({ type: "text", text });
373
+ }
374
+ const reasoning = choice.message.reasoning;
375
+ if (reasoning != null && reasoning.length > 0) {
376
+ content.push({
377
+ type: "reasoning",
378
+ text: reasoning
379
+ });
380
+ }
381
+ if (choice.message.tool_calls != null) {
382
+ for (const toolCall of choice.message.tool_calls) {
383
+ content.push({
384
+ type: "tool-call",
385
+ toolCallId: (_a = toolCall.id) != null ? _a : generateId(),
386
+ toolName: toolCall.function.name,
387
+ input: toolCall.function.arguments
388
+ });
389
+ }
390
+ }
391
+ return {
392
+ content,
393
+ finishReason: mapGroqFinishReason(choice.finish_reason),
394
+ usage: {
395
+ inputTokens: (_c = (_b = response.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : void 0,
396
+ outputTokens: (_e = (_d = response.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : void 0,
397
+ totalTokens: (_g = (_f = response.usage) == null ? void 0 : _f.total_tokens) != null ? _g : void 0
398
+ },
399
+ response: {
400
+ ...getResponseMetadata(response),
401
+ headers: responseHeaders,
402
+ body: rawResponse
403
+ },
404
+ warnings,
405
+ request: { body }
406
+ };
407
+ }
408
+ async doStream(options) {
409
+ const { args, warnings } = await this.getArgs({ ...options, stream: true });
410
+ const body = JSON.stringify({ ...args, stream: true });
411
+ const { responseHeaders, value: response } = await postJsonToApi({
412
+ url: this.config.url({
413
+ path: "/chat/completions",
414
+ modelId: this.modelId
415
+ }),
416
+ headers: combineHeaders(this.config.headers(), options.headers),
417
+ body: {
418
+ ...args,
419
+ stream: true
420
+ },
421
+ failedResponseHandler: groqFailedResponseHandler,
422
+ successfulResponseHandler: createEventSourceResponseHandler(groqChatChunkSchema),
423
+ abortSignal: options.abortSignal,
424
+ fetch: this.config.fetch
425
+ });
426
+ const toolCalls = [];
427
+ let finishReason = "unknown";
428
+ const usage = {
429
+ inputTokens: void 0,
430
+ outputTokens: void 0,
431
+ totalTokens: void 0
432
+ };
433
+ let isFirstChunk = true;
434
+ let isActiveText = false;
435
+ let isActiveReasoning = false;
436
+ let providerMetadata;
437
+ return {
438
+ stream: response.pipeThrough(
439
+ new TransformStream({
440
+ start(controller) {
441
+ controller.enqueue({ type: "stream-start", warnings });
442
+ },
443
+ transform(chunk, controller) {
444
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
445
+ if (options.includeRawChunks) {
446
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
447
+ }
448
+ if (!chunk.success) {
449
+ finishReason = "error";
450
+ controller.enqueue({ type: "error", error: chunk.error });
451
+ return;
452
+ }
453
+ const value = chunk.value;
454
+ if ("error" in value) {
455
+ finishReason = "error";
456
+ controller.enqueue({ type: "error", error: value.error });
457
+ return;
458
+ }
459
+ if (isFirstChunk) {
460
+ isFirstChunk = false;
461
+ controller.enqueue({
462
+ type: "response-metadata",
463
+ ...getResponseMetadata(value)
464
+ });
465
+ }
466
+ if (((_a = value.x_groq) == null ? void 0 : _a.usage) != null) {
467
+ usage.inputTokens = (_b = value.x_groq.usage.prompt_tokens) != null ? _b : void 0;
468
+ usage.outputTokens = (_c = value.x_groq.usage.completion_tokens) != null ? _c : void 0;
469
+ usage.totalTokens = (_d = value.x_groq.usage.total_tokens) != null ? _d : void 0;
470
+ }
471
+ const choice = value.choices[0];
472
+ if ((choice == null ? void 0 : choice.finish_reason) != null) {
473
+ finishReason = mapGroqFinishReason(choice.finish_reason);
474
+ }
475
+ if ((choice == null ? void 0 : choice.delta) == null) {
476
+ return;
477
+ }
478
+ const delta = choice.delta;
479
+ if (delta.reasoning != null && delta.reasoning.length > 0) {
480
+ if (!isActiveReasoning) {
481
+ controller.enqueue({
482
+ type: "reasoning-start",
483
+ id: "reasoning-0"
484
+ });
485
+ isActiveReasoning = true;
486
+ }
487
+ controller.enqueue({
488
+ type: "reasoning-delta",
489
+ id: "reasoning-0",
490
+ delta: delta.reasoning
491
+ });
492
+ }
493
+ if (delta.content != null && delta.content.length > 0) {
494
+ if (!isActiveText) {
495
+ controller.enqueue({ type: "text-start", id: "txt-0" });
496
+ isActiveText = true;
497
+ }
498
+ controller.enqueue({
499
+ type: "text-delta",
500
+ id: "txt-0",
501
+ delta: delta.content
502
+ });
503
+ }
504
+ if (delta.tool_calls != null) {
505
+ for (const toolCallDelta of delta.tool_calls) {
506
+ const index = toolCallDelta.index;
507
+ if (toolCalls[index] == null) {
508
+ if (toolCallDelta.type !== "function") {
509
+ throw new InvalidResponseDataError({
510
+ data: toolCallDelta,
511
+ message: `Expected 'function' type.`
512
+ });
513
+ }
514
+ if (toolCallDelta.id == null) {
515
+ throw new InvalidResponseDataError({
516
+ data: toolCallDelta,
517
+ message: `Expected 'id' to be a string.`
518
+ });
519
+ }
520
+ if (((_e = toolCallDelta.function) == null ? void 0 : _e.name) == null) {
521
+ throw new InvalidResponseDataError({
522
+ data: toolCallDelta,
523
+ message: `Expected 'function.name' to be a string.`
524
+ });
525
+ }
526
+ controller.enqueue({
527
+ type: "tool-input-start",
528
+ id: toolCallDelta.id,
529
+ toolName: toolCallDelta.function.name
530
+ });
531
+ toolCalls[index] = {
532
+ id: toolCallDelta.id,
533
+ type: "function",
534
+ function: {
535
+ name: toolCallDelta.function.name,
536
+ arguments: (_f = toolCallDelta.function.arguments) != null ? _f : ""
537
+ },
538
+ hasFinished: false
539
+ };
540
+ const toolCall2 = toolCalls[index];
541
+ if (((_g = toolCall2.function) == null ? void 0 : _g.name) != null && ((_h = toolCall2.function) == null ? void 0 : _h.arguments) != null) {
542
+ if (toolCall2.function.arguments.length > 0) {
543
+ controller.enqueue({
544
+ type: "tool-input-delta",
545
+ id: toolCall2.id,
546
+ delta: toolCall2.function.arguments
547
+ });
548
+ }
549
+ if (isParsableJson(toolCall2.function.arguments)) {
550
+ controller.enqueue({
551
+ type: "tool-input-end",
552
+ id: toolCall2.id
553
+ });
554
+ controller.enqueue({
555
+ type: "tool-call",
556
+ toolCallId: (_i = toolCall2.id) != null ? _i : generateId(),
557
+ toolName: toolCall2.function.name,
558
+ input: toolCall2.function.arguments
559
+ });
560
+ toolCall2.hasFinished = true;
561
+ }
562
+ }
563
+ continue;
564
+ }
565
+ const toolCall = toolCalls[index];
566
+ if (toolCall.hasFinished) {
567
+ continue;
568
+ }
569
+ if (((_j = toolCallDelta.function) == null ? void 0 : _j.arguments) != null) {
570
+ toolCall.function.arguments += (_l = (_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null ? _l : "";
571
+ }
572
+ controller.enqueue({
573
+ type: "tool-input-delta",
574
+ id: toolCall.id,
575
+ delta: (_m = toolCallDelta.function.arguments) != null ? _m : ""
576
+ });
577
+ if (((_n = toolCall.function) == null ? void 0 : _n.name) != null && ((_o = toolCall.function) == null ? void 0 : _o.arguments) != null && isParsableJson(toolCall.function.arguments)) {
578
+ controller.enqueue({
579
+ type: "tool-input-end",
580
+ id: toolCall.id
581
+ });
582
+ controller.enqueue({
583
+ type: "tool-call",
584
+ toolCallId: (_p = toolCall.id) != null ? _p : generateId(),
585
+ toolName: toolCall.function.name,
586
+ input: toolCall.function.arguments
587
+ });
588
+ toolCall.hasFinished = true;
589
+ }
590
+ }
591
+ }
592
+ },
593
+ flush(controller) {
594
+ if (isActiveReasoning) {
595
+ controller.enqueue({ type: "reasoning-end", id: "reasoning-0" });
596
+ }
597
+ if (isActiveText) {
598
+ controller.enqueue({ type: "text-end", id: "txt-0" });
599
+ }
600
+ controller.enqueue({
601
+ type: "finish",
602
+ finishReason,
603
+ usage,
604
+ ...providerMetadata != null ? { providerMetadata } : {}
605
+ });
606
+ }
607
+ })
608
+ ),
609
+ request: { body },
610
+ response: { headers: responseHeaders }
611
+ };
612
+ }
613
+ };
614
+ var groqChatResponseSchema = z3.object({
615
+ id: z3.string().nullish(),
616
+ created: z3.number().nullish(),
617
+ model: z3.string().nullish(),
618
+ choices: z3.array(
619
+ z3.object({
620
+ message: z3.object({
621
+ content: z3.string().nullish(),
622
+ reasoning: z3.string().nullish(),
623
+ tool_calls: z3.array(
624
+ z3.object({
625
+ id: z3.string().nullish(),
626
+ type: z3.literal("function"),
627
+ function: z3.object({
628
+ name: z3.string(),
629
+ arguments: z3.string()
630
+ })
631
+ })
632
+ ).nullish()
633
+ }),
634
+ index: z3.number(),
635
+ finish_reason: z3.string().nullish()
636
+ })
637
+ ),
638
+ usage: z3.object({
639
+ prompt_tokens: z3.number().nullish(),
640
+ completion_tokens: z3.number().nullish(),
641
+ total_tokens: z3.number().nullish()
642
+ }).nullish()
643
+ });
644
+ var groqChatChunkSchema = z3.union([
645
+ z3.object({
646
+ id: z3.string().nullish(),
647
+ created: z3.number().nullish(),
648
+ model: z3.string().nullish(),
649
+ choices: z3.array(
650
+ z3.object({
651
+ delta: z3.object({
652
+ content: z3.string().nullish(),
653
+ reasoning: z3.string().nullish(),
654
+ tool_calls: z3.array(
655
+ z3.object({
656
+ index: z3.number(),
657
+ id: z3.string().nullish(),
658
+ type: z3.literal("function").optional(),
659
+ function: z3.object({
660
+ name: z3.string().nullish(),
661
+ arguments: z3.string().nullish()
662
+ })
663
+ })
664
+ ).nullish()
665
+ }).nullish(),
666
+ finish_reason: z3.string().nullable().optional(),
667
+ index: z3.number()
668
+ })
669
+ ),
670
+ x_groq: z3.object({
671
+ usage: z3.object({
672
+ prompt_tokens: z3.number().nullish(),
673
+ completion_tokens: z3.number().nullish(),
674
+ total_tokens: z3.number().nullish()
675
+ }).nullish()
676
+ }).nullish()
677
+ }),
678
+ groqErrorDataSchema
679
+ ]);
680
+
681
+ // src/groq-transcription-model.ts
682
+ import {
683
+ combineHeaders as combineHeaders2,
684
+ convertBase64ToUint8Array,
685
+ createJsonResponseHandler as createJsonResponseHandler2,
686
+ parseProviderOptions as parseProviderOptions2,
687
+ postFormDataToApi
688
+ } from "@ai-sdk/provider-utils";
689
+ import { z as z4 } from "zod/v4";
690
+ var groqProviderOptionsSchema = z4.object({
691
+ language: z4.string().nullish(),
692
+ prompt: z4.string().nullish(),
693
+ responseFormat: z4.string().nullish(),
694
+ temperature: z4.number().min(0).max(1).nullish(),
695
+ timestampGranularities: z4.array(z4.string()).nullish()
696
+ });
697
+ var GroqTranscriptionModel = class {
698
+ constructor(modelId, config) {
699
+ this.modelId = modelId;
700
+ this.config = config;
701
+ this.specificationVersion = "v2";
702
+ }
703
+ get provider() {
704
+ return this.config.provider;
705
+ }
706
+ async getArgs({
707
+ audio,
708
+ mediaType,
709
+ providerOptions
710
+ }) {
711
+ var _a, _b, _c, _d, _e;
712
+ const warnings = [];
713
+ const groqOptions = await parseProviderOptions2({
714
+ provider: "groq",
715
+ providerOptions,
716
+ schema: groqProviderOptionsSchema
717
+ });
718
+ const formData = new FormData();
719
+ const blob = audio instanceof Uint8Array ? new Blob([audio]) : new Blob([convertBase64ToUint8Array(audio)]);
720
+ formData.append("model", this.modelId);
721
+ formData.append("file", new File([blob], "audio", { type: mediaType }));
722
+ if (groqOptions) {
723
+ const transcriptionModelOptions = {
724
+ language: (_a = groqOptions.language) != null ? _a : void 0,
725
+ prompt: (_b = groqOptions.prompt) != null ? _b : void 0,
726
+ response_format: (_c = groqOptions.responseFormat) != null ? _c : void 0,
727
+ temperature: (_d = groqOptions.temperature) != null ? _d : void 0,
728
+ timestamp_granularities: (_e = groqOptions.timestampGranularities) != null ? _e : void 0
729
+ };
730
+ for (const key in transcriptionModelOptions) {
731
+ const value = transcriptionModelOptions[key];
732
+ if (value !== void 0) {
733
+ formData.append(key, String(value));
734
+ }
735
+ }
736
+ }
737
+ return {
738
+ formData,
739
+ warnings
740
+ };
741
+ }
742
+ async doGenerate(options) {
743
+ var _a, _b, _c, _d, _e;
744
+ const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
745
+ const { formData, warnings } = await this.getArgs(options);
746
+ const {
747
+ value: response,
748
+ responseHeaders,
749
+ rawValue: rawResponse
750
+ } = await postFormDataToApi({
751
+ url: this.config.url({
752
+ path: "/audio/transcriptions",
753
+ modelId: this.modelId
754
+ }),
755
+ headers: combineHeaders2(this.config.headers(), options.headers),
756
+ formData,
757
+ failedResponseHandler: groqFailedResponseHandler,
758
+ successfulResponseHandler: createJsonResponseHandler2(
759
+ groqTranscriptionResponseSchema
760
+ ),
761
+ abortSignal: options.abortSignal,
762
+ fetch: this.config.fetch
763
+ });
764
+ return {
765
+ text: response.text,
766
+ segments: (_e = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
767
+ text: segment.text,
768
+ startSecond: segment.start,
769
+ endSecond: segment.end
770
+ }))) != null ? _e : [],
771
+ language: response.language,
772
+ durationInSeconds: response.duration,
773
+ warnings,
774
+ response: {
775
+ timestamp: currentDate,
776
+ modelId: this.modelId,
777
+ headers: responseHeaders,
778
+ body: rawResponse
779
+ }
780
+ };
781
+ }
782
+ };
783
+ var groqTranscriptionResponseSchema = z4.object({
784
+ task: z4.string(),
785
+ language: z4.string(),
786
+ duration: z4.number(),
787
+ text: z4.string(),
788
+ segments: z4.array(
789
+ z4.object({
790
+ id: z4.number(),
791
+ seek: z4.number(),
792
+ start: z4.number(),
793
+ end: z4.number(),
794
+ text: z4.string(),
795
+ tokens: z4.array(z4.number()),
796
+ temperature: z4.number(),
797
+ avg_logprob: z4.number(),
798
+ compression_ratio: z4.number(),
799
+ no_speech_prob: z4.number()
800
+ })
801
+ ),
802
+ x_groq: z4.object({
803
+ id: z4.string()
804
+ })
805
+ });
806
+
807
+ // src/groq-provider.ts
808
+ function createGroq(options = {}) {
809
+ var _a;
810
+ const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://api.groq.com/openai/v1";
811
+ const getHeaders = () => ({
812
+ Authorization: `Bearer ${loadApiKey({
813
+ apiKey: options.apiKey,
814
+ environmentVariableName: "GROQ_API_KEY",
815
+ description: "Groq"
816
+ })}`,
817
+ ...options.headers
818
+ });
819
+ const createChatModel = (modelId) => new GroqChatLanguageModel(modelId, {
820
+ provider: "groq.chat",
821
+ url: ({ path }) => `${baseURL}${path}`,
822
+ headers: getHeaders,
823
+ fetch: options.fetch
824
+ });
825
+ const createLanguageModel = (modelId) => {
826
+ if (new.target) {
827
+ throw new Error(
828
+ "The Groq model function cannot be called with the new keyword."
829
+ );
830
+ }
831
+ return createChatModel(modelId);
832
+ };
833
+ const createTranscriptionModel = (modelId) => {
834
+ return new GroqTranscriptionModel(modelId, {
835
+ provider: "groq.transcription",
836
+ url: ({ path }) => `${baseURL}${path}`,
837
+ headers: getHeaders,
838
+ fetch: options.fetch
839
+ });
840
+ };
841
+ const provider = function(modelId) {
842
+ return createLanguageModel(modelId);
843
+ };
844
+ provider.languageModel = createLanguageModel;
845
+ provider.chat = createChatModel;
846
+ provider.textEmbeddingModel = (modelId) => {
847
+ throw new NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
848
+ };
849
+ provider.imageModel = (modelId) => {
850
+ throw new NoSuchModelError({ modelId, modelType: "imageModel" });
851
+ };
852
+ provider.transcription = createTranscriptionModel;
853
+ return provider;
854
+ }
855
+ var groq = createGroq();
856
+ export {
857
+ createGroq,
858
+ groq
859
+ };
860
+ //# sourceMappingURL=index.mjs.map