@ai-sdk/groq 0.0.0-70e0935a-20260114150030 → 0.0.0-98261322-20260122142521
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +43 -4
- package/dist/index.js +19 -5
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +19 -5
- package/dist/index.mjs.map +1 -1
- package/docs/09-groq.mdx +478 -0
- package/package.json +10 -5
- package/src/convert-groq-usage.test.ts +237 -0
- package/src/convert-groq-usage.ts +64 -0
- package/src/convert-to-groq-chat-messages.test.ts +205 -0
- package/src/convert-to-groq-chat-messages.ts +147 -0
- package/src/get-response-metadata.ts +15 -0
- package/src/groq-api-types.ts +100 -0
- package/src/groq-browser-search-models.ts +26 -0
- package/src/groq-chat-language-model.test.ts +2028 -0
- package/src/groq-chat-language-model.ts +658 -0
- package/src/groq-chat-options.test.ts +110 -0
- package/src/groq-chat-options.ts +78 -0
- package/src/groq-config.ts +9 -0
- package/src/groq-error.ts +16 -0
- package/src/groq-prepare-tools.test.ts +272 -0
- package/src/groq-prepare-tools.ts +128 -0
- package/src/groq-provider.ts +143 -0
- package/src/groq-tools.ts +5 -0
- package/src/groq-transcription-model.test.ts +185 -0
- package/src/groq-transcription-model.ts +184 -0
- package/src/groq-transcription-options.ts +4 -0
- package/src/index.ts +5 -0
- package/src/map-groq-finish-reason.ts +19 -0
- package/src/tool/browser-search.ts +28 -0
- package/src/transcript-test.mp3 +0 -0
- package/src/version.ts +6 -0
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
import {
|
|
2
|
+
LanguageModelV3,
|
|
3
|
+
NoSuchModelError,
|
|
4
|
+
ProviderV3,
|
|
5
|
+
TranscriptionModelV3,
|
|
6
|
+
} from '@ai-sdk/provider';
|
|
7
|
+
import {
|
|
8
|
+
FetchFunction,
|
|
9
|
+
loadApiKey,
|
|
10
|
+
withoutTrailingSlash,
|
|
11
|
+
withUserAgentSuffix,
|
|
12
|
+
} from '@ai-sdk/provider-utils';
|
|
13
|
+
import { GroqChatLanguageModel } from './groq-chat-language-model';
|
|
14
|
+
import { GroqChatModelId } from './groq-chat-options';
|
|
15
|
+
import { GroqTranscriptionModelId } from './groq-transcription-options';
|
|
16
|
+
import { GroqTranscriptionModel } from './groq-transcription-model';
|
|
17
|
+
|
|
18
|
+
import { groqTools } from './groq-tools';
|
|
19
|
+
import { VERSION } from './version';
|
|
20
|
+
export interface GroqProvider extends ProviderV3 {
|
|
21
|
+
/**
|
|
22
|
+
Creates a model for text generation.
|
|
23
|
+
*/
|
|
24
|
+
(modelId: GroqChatModelId): LanguageModelV3;
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
Creates an Groq chat model for text generation.
|
|
28
|
+
*/
|
|
29
|
+
languageModel(modelId: GroqChatModelId): LanguageModelV3;
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
Creates a model for transcription.
|
|
33
|
+
*/
|
|
34
|
+
transcription(modelId: GroqTranscriptionModelId): TranscriptionModelV3;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Tools provided by Groq.
|
|
38
|
+
*/
|
|
39
|
+
tools: typeof groqTools;
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* @deprecated Use `embeddingModel` instead.
|
|
43
|
+
*/
|
|
44
|
+
textEmbeddingModel(modelId: string): never;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
export interface GroqProviderSettings {
|
|
48
|
+
/**
|
|
49
|
+
Base URL for the Groq API calls.
|
|
50
|
+
*/
|
|
51
|
+
baseURL?: string;
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
API key for authenticating requests.
|
|
55
|
+
*/
|
|
56
|
+
apiKey?: string;
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
Custom headers to include in the requests.
|
|
60
|
+
*/
|
|
61
|
+
headers?: Record<string, string>;
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
|
65
|
+
or to provide a custom fetch implementation for e.g. testing.
|
|
66
|
+
*/
|
|
67
|
+
fetch?: FetchFunction;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
Create an Groq provider instance.
|
|
72
|
+
*/
|
|
73
|
+
export function createGroq(options: GroqProviderSettings = {}): GroqProvider {
|
|
74
|
+
const baseURL =
|
|
75
|
+
withoutTrailingSlash(options.baseURL) ?? 'https://api.groq.com/openai/v1';
|
|
76
|
+
|
|
77
|
+
const getHeaders = () =>
|
|
78
|
+
withUserAgentSuffix(
|
|
79
|
+
{
|
|
80
|
+
Authorization: `Bearer ${loadApiKey({
|
|
81
|
+
apiKey: options.apiKey,
|
|
82
|
+
environmentVariableName: 'GROQ_API_KEY',
|
|
83
|
+
description: 'Groq',
|
|
84
|
+
})}`,
|
|
85
|
+
...options.headers,
|
|
86
|
+
},
|
|
87
|
+
`ai-sdk/groq/${VERSION}`,
|
|
88
|
+
);
|
|
89
|
+
|
|
90
|
+
const createChatModel = (modelId: GroqChatModelId) =>
|
|
91
|
+
new GroqChatLanguageModel(modelId, {
|
|
92
|
+
provider: 'groq.chat',
|
|
93
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
94
|
+
headers: getHeaders,
|
|
95
|
+
fetch: options.fetch,
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
const createLanguageModel = (modelId: GroqChatModelId) => {
|
|
99
|
+
if (new.target) {
|
|
100
|
+
throw new Error(
|
|
101
|
+
'The Groq model function cannot be called with the new keyword.',
|
|
102
|
+
);
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
return createChatModel(modelId);
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
const createTranscriptionModel = (modelId: GroqTranscriptionModelId) => {
|
|
109
|
+
return new GroqTranscriptionModel(modelId, {
|
|
110
|
+
provider: 'groq.transcription',
|
|
111
|
+
url: ({ path }) => `${baseURL}${path}`,
|
|
112
|
+
headers: getHeaders,
|
|
113
|
+
fetch: options.fetch,
|
|
114
|
+
});
|
|
115
|
+
};
|
|
116
|
+
|
|
117
|
+
const provider = function (modelId: GroqChatModelId) {
|
|
118
|
+
return createLanguageModel(modelId);
|
|
119
|
+
};
|
|
120
|
+
|
|
121
|
+
provider.specificationVersion = 'v3' as const;
|
|
122
|
+
provider.languageModel = createLanguageModel;
|
|
123
|
+
provider.chat = createChatModel;
|
|
124
|
+
|
|
125
|
+
provider.embeddingModel = (modelId: string) => {
|
|
126
|
+
throw new NoSuchModelError({ modelId, modelType: 'embeddingModel' });
|
|
127
|
+
};
|
|
128
|
+
provider.textEmbeddingModel = provider.embeddingModel;
|
|
129
|
+
provider.imageModel = (modelId: string) => {
|
|
130
|
+
throw new NoSuchModelError({ modelId, modelType: 'imageModel' });
|
|
131
|
+
};
|
|
132
|
+
provider.transcription = createTranscriptionModel;
|
|
133
|
+
provider.transcriptionModel = createTranscriptionModel;
|
|
134
|
+
|
|
135
|
+
provider.tools = groqTools;
|
|
136
|
+
|
|
137
|
+
return provider;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
Default Groq provider instance.
|
|
142
|
+
*/
|
|
143
|
+
export const groq = createGroq();
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
import { createTestServer } from '@ai-sdk/test-server/with-vitest';
|
|
2
|
+
import { GroqTranscriptionModel } from './groq-transcription-model';
|
|
3
|
+
import { createGroq } from './groq-provider';
|
|
4
|
+
import { readFile } from 'node:fs/promises';
|
|
5
|
+
import path from 'node:path';
|
|
6
|
+
import { describe, it, expect, vi } from 'vitest';
|
|
7
|
+
|
|
8
|
+
vi.mock('./version', () => ({
|
|
9
|
+
VERSION: '0.0.0-test',
|
|
10
|
+
}));
|
|
11
|
+
|
|
12
|
+
const audioData = await readFile(path.join(__dirname, 'transcript-test.mp3'));
|
|
13
|
+
const provider = createGroq({ apiKey: 'test-api-key' });
|
|
14
|
+
const model = provider.transcription('whisper-large-v3-turbo');
|
|
15
|
+
|
|
16
|
+
const server = createTestServer({
|
|
17
|
+
'https://api.groq.com/openai/v1/audio/transcriptions': {},
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
describe('doGenerate', () => {
|
|
21
|
+
function prepareJsonResponse({
|
|
22
|
+
headers,
|
|
23
|
+
}: {
|
|
24
|
+
headers?: Record<string, string>;
|
|
25
|
+
} = {}) {
|
|
26
|
+
server.urls[
|
|
27
|
+
'https://api.groq.com/openai/v1/audio/transcriptions'
|
|
28
|
+
].response = {
|
|
29
|
+
type: 'json-value',
|
|
30
|
+
headers,
|
|
31
|
+
body: {
|
|
32
|
+
task: 'transcribe',
|
|
33
|
+
language: 'English',
|
|
34
|
+
duration: 2.5,
|
|
35
|
+
text: 'Hello world!',
|
|
36
|
+
segments: [
|
|
37
|
+
{
|
|
38
|
+
id: 0,
|
|
39
|
+
seek: 0,
|
|
40
|
+
start: 0,
|
|
41
|
+
end: 2.48,
|
|
42
|
+
text: 'Hello world!',
|
|
43
|
+
tokens: [50365, 2425, 490, 264],
|
|
44
|
+
temperature: 0,
|
|
45
|
+
avg_logprob: -0.29010406,
|
|
46
|
+
compression_ratio: 0.7777778,
|
|
47
|
+
no_speech_prob: 0.032802984,
|
|
48
|
+
},
|
|
49
|
+
],
|
|
50
|
+
x_groq: { id: 'req_01jrh9nn61f24rydqq1r4b3yg5' },
|
|
51
|
+
},
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
it('should pass the model', async () => {
|
|
56
|
+
prepareJsonResponse();
|
|
57
|
+
|
|
58
|
+
await model.doGenerate({
|
|
59
|
+
audio: audioData,
|
|
60
|
+
mediaType: 'audio/wav',
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
expect(await server.calls[0].requestBodyMultipart).toMatchObject({
|
|
64
|
+
model: 'whisper-large-v3-turbo',
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
it('should pass headers', async () => {
|
|
69
|
+
prepareJsonResponse();
|
|
70
|
+
|
|
71
|
+
const provider = createGroq({
|
|
72
|
+
apiKey: 'test-api-key',
|
|
73
|
+
headers: {
|
|
74
|
+
'Custom-Provider-Header': 'provider-header-value',
|
|
75
|
+
},
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
await provider.transcription('whisper-large-v3-turbo').doGenerate({
|
|
79
|
+
audio: audioData,
|
|
80
|
+
mediaType: 'audio/wav',
|
|
81
|
+
headers: {
|
|
82
|
+
'Custom-Request-Header': 'request-header-value',
|
|
83
|
+
},
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
expect(server.calls[0].requestHeaders).toMatchObject({
|
|
87
|
+
authorization: 'Bearer test-api-key',
|
|
88
|
+
'content-type': expect.stringMatching(
|
|
89
|
+
/^multipart\/form-data; boundary=----formdata-undici-\d+$/,
|
|
90
|
+
),
|
|
91
|
+
'custom-provider-header': 'provider-header-value',
|
|
92
|
+
'custom-request-header': 'request-header-value',
|
|
93
|
+
});
|
|
94
|
+
expect(server.calls[0].requestUserAgent).toContain(
|
|
95
|
+
`ai-sdk/groq/0.0.0-test`,
|
|
96
|
+
);
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
it('should extract the transcription text', async () => {
|
|
100
|
+
prepareJsonResponse();
|
|
101
|
+
|
|
102
|
+
const result = await model.doGenerate({
|
|
103
|
+
audio: audioData,
|
|
104
|
+
mediaType: 'audio/wav',
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
expect(result.text).toBe('Hello world!');
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
it('should include response data with timestamp, modelId and headers', async () => {
|
|
111
|
+
prepareJsonResponse({
|
|
112
|
+
headers: {
|
|
113
|
+
'x-request-id': 'test-request-id',
|
|
114
|
+
'x-ratelimit-remaining': '123',
|
|
115
|
+
},
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
const testDate = new Date(0);
|
|
119
|
+
const customModel = new GroqTranscriptionModel('whisper-large-v3-turbo', {
|
|
120
|
+
provider: 'test-provider',
|
|
121
|
+
url: () => 'https://api.groq.com/openai/v1/audio/transcriptions',
|
|
122
|
+
headers: () => ({}),
|
|
123
|
+
_internal: {
|
|
124
|
+
currentDate: () => testDate,
|
|
125
|
+
},
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
const result = await customModel.doGenerate({
|
|
129
|
+
audio: audioData,
|
|
130
|
+
mediaType: 'audio/wav',
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
expect(result.response).toMatchObject({
|
|
134
|
+
timestamp: testDate,
|
|
135
|
+
modelId: 'whisper-large-v3-turbo',
|
|
136
|
+
headers: {
|
|
137
|
+
'content-type': 'application/json',
|
|
138
|
+
'x-request-id': 'test-request-id',
|
|
139
|
+
'x-ratelimit-remaining': '123',
|
|
140
|
+
},
|
|
141
|
+
});
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
it('should use real date when no custom date provider is specified', async () => {
|
|
145
|
+
prepareJsonResponse();
|
|
146
|
+
|
|
147
|
+
const testDate = new Date(0);
|
|
148
|
+
const customModel = new GroqTranscriptionModel('whisper-large-v3-turbo', {
|
|
149
|
+
provider: 'test-provider',
|
|
150
|
+
url: () => 'https://api.groq.com/openai/v1/audio/transcriptions',
|
|
151
|
+
headers: () => ({}),
|
|
152
|
+
_internal: {
|
|
153
|
+
currentDate: () => testDate,
|
|
154
|
+
},
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
const result = await customModel.doGenerate({
|
|
158
|
+
audio: audioData,
|
|
159
|
+
mediaType: 'audio/wav',
|
|
160
|
+
});
|
|
161
|
+
|
|
162
|
+
expect(result.response.timestamp.getTime()).toEqual(testDate.getTime());
|
|
163
|
+
expect(result.response.modelId).toBe('whisper-large-v3-turbo');
|
|
164
|
+
});
|
|
165
|
+
|
|
166
|
+
it('should correctly pass provider options when they are an array', async () => {
|
|
167
|
+
prepareJsonResponse();
|
|
168
|
+
|
|
169
|
+
await model.doGenerate({
|
|
170
|
+
audio: audioData,
|
|
171
|
+
mediaType: 'audio/wav',
|
|
172
|
+
providerOptions: {
|
|
173
|
+
groq: {
|
|
174
|
+
timestampGranularities: ['segment'],
|
|
175
|
+
responseFormat: 'verbose_json',
|
|
176
|
+
},
|
|
177
|
+
},
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
expect(await server.calls[0].requestBodyMultipart).toMatchObject({
|
|
181
|
+
'timestamp_granularities[]': 'segment',
|
|
182
|
+
response_format: 'verbose_json',
|
|
183
|
+
});
|
|
184
|
+
});
|
|
185
|
+
});
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
import { TranscriptionModelV3, SharedV3Warning } from '@ai-sdk/provider';
|
|
2
|
+
import {
|
|
3
|
+
combineHeaders,
|
|
4
|
+
convertBase64ToUint8Array,
|
|
5
|
+
createJsonResponseHandler,
|
|
6
|
+
mediaTypeToExtension,
|
|
7
|
+
parseProviderOptions,
|
|
8
|
+
postFormDataToApi,
|
|
9
|
+
} from '@ai-sdk/provider-utils';
|
|
10
|
+
import { z } from 'zod/v4';
|
|
11
|
+
import { GroqConfig } from './groq-config';
|
|
12
|
+
import { groqFailedResponseHandler } from './groq-error';
|
|
13
|
+
import { GroqTranscriptionModelId } from './groq-transcription-options';
|
|
14
|
+
import { GroqTranscriptionAPITypes } from './groq-api-types';
|
|
15
|
+
|
|
16
|
+
// https://console.groq.com/docs/speech-to-text
|
|
17
|
+
const groqProviderOptionsSchema = z.object({
|
|
18
|
+
language: z.string().nullish(),
|
|
19
|
+
prompt: z.string().nullish(),
|
|
20
|
+
responseFormat: z.string().nullish(),
|
|
21
|
+
temperature: z.number().min(0).max(1).nullish(),
|
|
22
|
+
timestampGranularities: z.array(z.string()).nullish(),
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
export type GroqTranscriptionCallOptions = z.infer<
|
|
26
|
+
typeof groqProviderOptionsSchema
|
|
27
|
+
>;
|
|
28
|
+
|
|
29
|
+
interface GroqTranscriptionModelConfig extends GroqConfig {
|
|
30
|
+
_internal?: {
|
|
31
|
+
currentDate?: () => Date;
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export class GroqTranscriptionModel implements TranscriptionModelV3 {
|
|
36
|
+
readonly specificationVersion = 'v3';
|
|
37
|
+
|
|
38
|
+
get provider(): string {
|
|
39
|
+
return this.config.provider;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
constructor(
|
|
43
|
+
readonly modelId: GroqTranscriptionModelId,
|
|
44
|
+
private readonly config: GroqTranscriptionModelConfig,
|
|
45
|
+
) {}
|
|
46
|
+
|
|
47
|
+
private async getArgs({
|
|
48
|
+
audio,
|
|
49
|
+
mediaType,
|
|
50
|
+
providerOptions,
|
|
51
|
+
}: Parameters<TranscriptionModelV3['doGenerate']>[0]) {
|
|
52
|
+
const warnings: SharedV3Warning[] = [];
|
|
53
|
+
|
|
54
|
+
// Parse provider options
|
|
55
|
+
const groqOptions = await parseProviderOptions({
|
|
56
|
+
provider: 'groq',
|
|
57
|
+
providerOptions,
|
|
58
|
+
schema: groqProviderOptionsSchema,
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
// Create form data with base fields
|
|
62
|
+
const formData = new FormData();
|
|
63
|
+
const blob =
|
|
64
|
+
audio instanceof Uint8Array
|
|
65
|
+
? new Blob([audio])
|
|
66
|
+
: new Blob([convertBase64ToUint8Array(audio)]);
|
|
67
|
+
|
|
68
|
+
formData.append('model', this.modelId);
|
|
69
|
+
const fileExtension = mediaTypeToExtension(mediaType);
|
|
70
|
+
formData.append(
|
|
71
|
+
'file',
|
|
72
|
+
new File([blob], 'audio', { type: mediaType }),
|
|
73
|
+
`audio.${fileExtension}`,
|
|
74
|
+
);
|
|
75
|
+
|
|
76
|
+
// Add provider-specific options
|
|
77
|
+
if (groqOptions) {
|
|
78
|
+
const transcriptionModelOptions: Omit<
|
|
79
|
+
GroqTranscriptionAPITypes,
|
|
80
|
+
'model'
|
|
81
|
+
> = {
|
|
82
|
+
language: groqOptions.language ?? undefined,
|
|
83
|
+
prompt: groqOptions.prompt ?? undefined,
|
|
84
|
+
response_format: groqOptions.responseFormat ?? undefined,
|
|
85
|
+
temperature: groqOptions.temperature ?? undefined,
|
|
86
|
+
timestamp_granularities:
|
|
87
|
+
groqOptions.timestampGranularities ?? undefined,
|
|
88
|
+
};
|
|
89
|
+
|
|
90
|
+
for (const key in transcriptionModelOptions) {
|
|
91
|
+
const value =
|
|
92
|
+
transcriptionModelOptions[
|
|
93
|
+
key as keyof Omit<GroqTranscriptionAPITypes, 'model'>
|
|
94
|
+
];
|
|
95
|
+
if (value !== undefined) {
|
|
96
|
+
if (Array.isArray(value)) {
|
|
97
|
+
for (const item of value) {
|
|
98
|
+
formData.append(`${key}[]`, String(item));
|
|
99
|
+
}
|
|
100
|
+
} else {
|
|
101
|
+
formData.append(key, String(value));
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
return {
|
|
108
|
+
formData,
|
|
109
|
+
warnings,
|
|
110
|
+
};
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
async doGenerate(
|
|
114
|
+
options: Parameters<TranscriptionModelV3['doGenerate']>[0],
|
|
115
|
+
): Promise<Awaited<ReturnType<TranscriptionModelV3['doGenerate']>>> {
|
|
116
|
+
const currentDate = this.config._internal?.currentDate?.() ?? new Date();
|
|
117
|
+
const { formData, warnings } = await this.getArgs(options);
|
|
118
|
+
|
|
119
|
+
const {
|
|
120
|
+
value: response,
|
|
121
|
+
responseHeaders,
|
|
122
|
+
rawValue: rawResponse,
|
|
123
|
+
} = await postFormDataToApi({
|
|
124
|
+
url: this.config.url({
|
|
125
|
+
path: '/audio/transcriptions',
|
|
126
|
+
modelId: this.modelId,
|
|
127
|
+
}),
|
|
128
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
129
|
+
formData,
|
|
130
|
+
failedResponseHandler: groqFailedResponseHandler,
|
|
131
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
132
|
+
groqTranscriptionResponseSchema,
|
|
133
|
+
),
|
|
134
|
+
abortSignal: options.abortSignal,
|
|
135
|
+
fetch: this.config.fetch,
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
return {
|
|
139
|
+
text: response.text,
|
|
140
|
+
segments:
|
|
141
|
+
response.segments?.map(segment => ({
|
|
142
|
+
text: segment.text,
|
|
143
|
+
startSecond: segment.start,
|
|
144
|
+
endSecond: segment.end,
|
|
145
|
+
})) ?? [],
|
|
146
|
+
language: response.language ?? undefined,
|
|
147
|
+
durationInSeconds: response.duration ?? undefined,
|
|
148
|
+
warnings,
|
|
149
|
+
response: {
|
|
150
|
+
timestamp: currentDate,
|
|
151
|
+
modelId: this.modelId,
|
|
152
|
+
headers: responseHeaders,
|
|
153
|
+
body: rawResponse,
|
|
154
|
+
},
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
const groqTranscriptionResponseSchema = z.object({
|
|
160
|
+
text: z.string(),
|
|
161
|
+
x_groq: z.object({
|
|
162
|
+
id: z.string(),
|
|
163
|
+
}),
|
|
164
|
+
// additional properties are returned when `response_format: 'verbose_json'` is
|
|
165
|
+
task: z.string().nullish(),
|
|
166
|
+
language: z.string().nullish(),
|
|
167
|
+
duration: z.number().nullish(),
|
|
168
|
+
segments: z
|
|
169
|
+
.array(
|
|
170
|
+
z.object({
|
|
171
|
+
id: z.number(),
|
|
172
|
+
seek: z.number(),
|
|
173
|
+
start: z.number(),
|
|
174
|
+
end: z.number(),
|
|
175
|
+
text: z.string(),
|
|
176
|
+
tokens: z.array(z.number()),
|
|
177
|
+
temperature: z.number(),
|
|
178
|
+
avg_logprob: z.number(),
|
|
179
|
+
compression_ratio: z.number(),
|
|
180
|
+
no_speech_prob: z.number(),
|
|
181
|
+
}),
|
|
182
|
+
)
|
|
183
|
+
.nullish(),
|
|
184
|
+
});
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
export { createGroq, groq } from './groq-provider';
|
|
2
|
+
export type { GroqProvider, GroqProviderSettings } from './groq-provider';
|
|
3
|
+
export type { GroqProviderOptions } from './groq-chat-options';
|
|
4
|
+
export { browserSearch } from './tool/browser-search';
|
|
5
|
+
export { VERSION } from './version';
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { LanguageModelV3FinishReason } from '@ai-sdk/provider';
|
|
2
|
+
|
|
3
|
+
export function mapGroqFinishReason(
|
|
4
|
+
finishReason: string | null | undefined,
|
|
5
|
+
): LanguageModelV3FinishReason['unified'] {
|
|
6
|
+
switch (finishReason) {
|
|
7
|
+
case 'stop':
|
|
8
|
+
return 'stop';
|
|
9
|
+
case 'length':
|
|
10
|
+
return 'length';
|
|
11
|
+
case 'content_filter':
|
|
12
|
+
return 'content-filter';
|
|
13
|
+
case 'function_call':
|
|
14
|
+
case 'tool_calls':
|
|
15
|
+
return 'tool-calls';
|
|
16
|
+
default:
|
|
17
|
+
return 'other';
|
|
18
|
+
}
|
|
19
|
+
}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import { createProviderToolFactory } from '@ai-sdk/provider-utils';
|
|
2
|
+
import { z } from 'zod/v4';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Browser search tool for Groq models.
|
|
6
|
+
*
|
|
7
|
+
* Provides interactive browser search capabilities that go beyond traditional web search
|
|
8
|
+
* by navigating websites interactively and providing more detailed results.
|
|
9
|
+
*
|
|
10
|
+
* Currently supported on:
|
|
11
|
+
* - openai/gpt-oss-20b
|
|
12
|
+
* - openai/gpt-oss-120b
|
|
13
|
+
*
|
|
14
|
+
* @see https://console.groq.com/docs/browser-search
|
|
15
|
+
*/
|
|
16
|
+
export const browserSearch = createProviderToolFactory<
|
|
17
|
+
{
|
|
18
|
+
// Browser search doesn't take input parameters - it's controlled by the prompt
|
|
19
|
+
// The tool is activated automatically when included in the tools array
|
|
20
|
+
},
|
|
21
|
+
{
|
|
22
|
+
// No configuration options needed - the tool works automatically
|
|
23
|
+
// when included in the tools array for supported models
|
|
24
|
+
}
|
|
25
|
+
>({
|
|
26
|
+
id: 'groq.browser_search',
|
|
27
|
+
inputSchema: z.object({}),
|
|
28
|
+
});
|
|
Binary file
|