@ai-sdk/groq 3.0.12 → 3.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # @ai-sdk/groq
2
2
 
3
+ ## 3.0.13
4
+
5
+ ### Patch Changes
6
+
7
+ - 8dc54db: chore: add src folders to package bundle
8
+
3
9
  ## 3.0.12
4
10
 
5
11
  ### Patch Changes
package/dist/index.js CHANGED
@@ -973,7 +973,7 @@ var groqTools = {
973
973
  };
974
974
 
975
975
  // src/version.ts
976
- var VERSION = true ? "3.0.12" : "0.0.0-test";
976
+ var VERSION = true ? "3.0.13" : "0.0.0-test";
977
977
 
978
978
  // src/groq-provider.ts
979
979
  function createGroq(options = {}) {
package/dist/index.mjs CHANGED
@@ -971,7 +971,7 @@ var groqTools = {
971
971
  };
972
972
 
973
973
  // src/version.ts
974
- var VERSION = true ? "3.0.12" : "0.0.0-test";
974
+ var VERSION = true ? "3.0.13" : "0.0.0-test";
975
975
 
976
976
  // src/groq-provider.ts
977
977
  function createGroq(options = {}) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/groq",
3
- "version": "3.0.12",
3
+ "version": "3.0.13",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -8,6 +8,7 @@
8
8
  "types": "./dist/index.d.ts",
9
9
  "files": [
10
10
  "dist/**/*",
11
+ "src",
11
12
  "CHANGELOG.md",
12
13
  "README.md"
13
14
  ],
@@ -28,8 +29,8 @@
28
29
  "tsup": "^8",
29
30
  "typescript": "5.8.3",
30
31
  "zod": "3.25.76",
31
- "@vercel/ai-tsconfig": "0.0.0",
32
- "@ai-sdk/test-server": "1.0.1"
32
+ "@ai-sdk/test-server": "1.0.2",
33
+ "@vercel/ai-tsconfig": "0.0.0"
33
34
  },
34
35
  "peerDependencies": {
35
36
  "zod": "^3.25.76 || ^4.1.8"
@@ -0,0 +1,237 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { convertGroqUsage } from './convert-groq-usage';
3
+
4
+ describe('convertGroqUsage', () => {
5
+ it('should return undefined values when usage is null', () => {
6
+ const result = convertGroqUsage(null);
7
+
8
+ expect(result).toStrictEqual({
9
+ inputTokens: {
10
+ total: undefined,
11
+ noCache: undefined,
12
+ cacheRead: undefined,
13
+ cacheWrite: undefined,
14
+ },
15
+ outputTokens: {
16
+ total: undefined,
17
+ text: undefined,
18
+ reasoning: undefined,
19
+ },
20
+ raw: undefined,
21
+ });
22
+ });
23
+
24
+ it('should return undefined values when usage is undefined', () => {
25
+ const result = convertGroqUsage(undefined);
26
+
27
+ expect(result).toStrictEqual({
28
+ inputTokens: {
29
+ total: undefined,
30
+ noCache: undefined,
31
+ cacheRead: undefined,
32
+ cacheWrite: undefined,
33
+ },
34
+ outputTokens: {
35
+ total: undefined,
36
+ text: undefined,
37
+ reasoning: undefined,
38
+ },
39
+ raw: undefined,
40
+ });
41
+ });
42
+
43
+ it('should convert basic usage without token details', () => {
44
+ const result = convertGroqUsage({
45
+ prompt_tokens: 20,
46
+ completion_tokens: 10,
47
+ });
48
+
49
+ expect(result).toStrictEqual({
50
+ inputTokens: {
51
+ total: 20,
52
+ noCache: 20,
53
+ cacheRead: undefined,
54
+ cacheWrite: undefined,
55
+ },
56
+ outputTokens: {
57
+ total: 10,
58
+ text: 10,
59
+ reasoning: undefined,
60
+ },
61
+ raw: {
62
+ prompt_tokens: 20,
63
+ completion_tokens: 10,
64
+ },
65
+ });
66
+ });
67
+
68
+ it('should extract reasoning tokens from completion_tokens_details', () => {
69
+ const result = convertGroqUsage({
70
+ prompt_tokens: 79,
71
+ completion_tokens: 40,
72
+ completion_tokens_details: {
73
+ reasoning_tokens: 21,
74
+ },
75
+ });
76
+
77
+ expect(result).toStrictEqual({
78
+ inputTokens: {
79
+ total: 79,
80
+ noCache: 79,
81
+ cacheRead: undefined,
82
+ cacheWrite: undefined,
83
+ },
84
+ outputTokens: {
85
+ total: 40,
86
+ text: 19, // 40 - 21 = 19
87
+ reasoning: 21,
88
+ },
89
+ raw: {
90
+ prompt_tokens: 79,
91
+ completion_tokens: 40,
92
+ completion_tokens_details: {
93
+ reasoning_tokens: 21,
94
+ },
95
+ },
96
+ });
97
+ });
98
+
99
+ it('should handle null reasoning_tokens in completion_tokens_details', () => {
100
+ const result = convertGroqUsage({
101
+ prompt_tokens: 20,
102
+ completion_tokens: 10,
103
+ completion_tokens_details: {
104
+ reasoning_tokens: null,
105
+ },
106
+ });
107
+
108
+ expect(result).toStrictEqual({
109
+ inputTokens: {
110
+ total: 20,
111
+ noCache: 20,
112
+ cacheRead: undefined,
113
+ cacheWrite: undefined,
114
+ },
115
+ outputTokens: {
116
+ total: 10,
117
+ text: 10,
118
+ reasoning: undefined,
119
+ },
120
+ raw: {
121
+ prompt_tokens: 20,
122
+ completion_tokens: 10,
123
+ completion_tokens_details: {
124
+ reasoning_tokens: null,
125
+ },
126
+ },
127
+ });
128
+ });
129
+
130
+ it('should handle null completion_tokens_details', () => {
131
+ const result = convertGroqUsage({
132
+ prompt_tokens: 20,
133
+ completion_tokens: 10,
134
+ completion_tokens_details: null,
135
+ });
136
+
137
+ expect(result).toStrictEqual({
138
+ inputTokens: {
139
+ total: 20,
140
+ noCache: 20,
141
+ cacheRead: undefined,
142
+ cacheWrite: undefined,
143
+ },
144
+ outputTokens: {
145
+ total: 10,
146
+ text: 10,
147
+ reasoning: undefined,
148
+ },
149
+ raw: {
150
+ prompt_tokens: 20,
151
+ completion_tokens: 10,
152
+ completion_tokens_details: null,
153
+ },
154
+ });
155
+ });
156
+
157
+ it('should handle zero reasoning tokens', () => {
158
+ const result = convertGroqUsage({
159
+ prompt_tokens: 20,
160
+ completion_tokens: 10,
161
+ completion_tokens_details: {
162
+ reasoning_tokens: 0,
163
+ },
164
+ });
165
+
166
+ expect(result).toStrictEqual({
167
+ inputTokens: {
168
+ total: 20,
169
+ noCache: 20,
170
+ cacheRead: undefined,
171
+ cacheWrite: undefined,
172
+ },
173
+ outputTokens: {
174
+ total: 10,
175
+ text: 10,
176
+ reasoning: 0,
177
+ },
178
+ raw: {
179
+ prompt_tokens: 20,
180
+ completion_tokens: 10,
181
+ completion_tokens_details: {
182
+ reasoning_tokens: 0,
183
+ },
184
+ },
185
+ });
186
+ });
187
+
188
+ it('should handle all tokens being reasoning tokens', () => {
189
+ const result = convertGroqUsage({
190
+ prompt_tokens: 20,
191
+ completion_tokens: 50,
192
+ completion_tokens_details: {
193
+ reasoning_tokens: 50,
194
+ },
195
+ });
196
+
197
+ expect(result).toStrictEqual({
198
+ inputTokens: {
199
+ total: 20,
200
+ noCache: 20,
201
+ cacheRead: undefined,
202
+ cacheWrite: undefined,
203
+ },
204
+ outputTokens: {
205
+ total: 50,
206
+ text: 0, // 50 - 50 = 0
207
+ reasoning: 50,
208
+ },
209
+ raw: {
210
+ prompt_tokens: 20,
211
+ completion_tokens: 50,
212
+ completion_tokens_details: {
213
+ reasoning_tokens: 50,
214
+ },
215
+ },
216
+ });
217
+ });
218
+
219
+ it('should handle missing prompt_tokens and completion_tokens', () => {
220
+ const result = convertGroqUsage({});
221
+
222
+ expect(result).toStrictEqual({
223
+ inputTokens: {
224
+ total: 0,
225
+ noCache: 0,
226
+ cacheRead: undefined,
227
+ cacheWrite: undefined,
228
+ },
229
+ outputTokens: {
230
+ total: 0,
231
+ text: 0,
232
+ reasoning: undefined,
233
+ },
234
+ raw: {},
235
+ });
236
+ });
237
+ });
@@ -0,0 +1,64 @@
1
+ import { LanguageModelV3Usage } from '@ai-sdk/provider';
2
+
3
+ export function convertGroqUsage(
4
+ usage:
5
+ | {
6
+ prompt_tokens?: number | null | undefined;
7
+ completion_tokens?: number | null | undefined;
8
+ prompt_tokens_details?:
9
+ | {
10
+ cached_tokens?: number | null | undefined;
11
+ }
12
+ | null
13
+ | undefined;
14
+ completion_tokens_details?:
15
+ | {
16
+ reasoning_tokens?: number | null | undefined;
17
+ }
18
+ | null
19
+ | undefined;
20
+ }
21
+ | undefined
22
+ | null,
23
+ ): LanguageModelV3Usage {
24
+ if (usage == null) {
25
+ return {
26
+ inputTokens: {
27
+ total: undefined,
28
+ noCache: undefined,
29
+ cacheRead: undefined,
30
+ cacheWrite: undefined,
31
+ },
32
+ outputTokens: {
33
+ total: undefined,
34
+ text: undefined,
35
+ reasoning: undefined,
36
+ },
37
+ raw: undefined,
38
+ };
39
+ }
40
+
41
+ const promptTokens = usage.prompt_tokens ?? 0;
42
+ const completionTokens = usage.completion_tokens ?? 0;
43
+ const reasoningTokens =
44
+ usage.completion_tokens_details?.reasoning_tokens ?? undefined;
45
+ const textTokens =
46
+ reasoningTokens != null
47
+ ? completionTokens - reasoningTokens
48
+ : completionTokens;
49
+
50
+ return {
51
+ inputTokens: {
52
+ total: promptTokens,
53
+ noCache: promptTokens,
54
+ cacheRead: undefined,
55
+ cacheWrite: undefined,
56
+ },
57
+ outputTokens: {
58
+ total: completionTokens,
59
+ text: textTokens,
60
+ reasoning: reasoningTokens,
61
+ },
62
+ raw: usage,
63
+ };
64
+ }
@@ -0,0 +1,205 @@
1
+ import { convertToGroqChatMessages } from './convert-to-groq-chat-messages';
2
+ import { describe, it, expect } from 'vitest';
3
+
4
+ describe('user messages', () => {
5
+ it('should convert messages with image parts', async () => {
6
+ const result = convertToGroqChatMessages([
7
+ {
8
+ role: 'user',
9
+ content: [
10
+ { type: 'text', text: 'Hello' },
11
+ {
12
+ type: 'file',
13
+ data: 'AAECAw==',
14
+ mediaType: 'image/png',
15
+ },
16
+ ],
17
+ },
18
+ ]);
19
+
20
+ expect(result).toMatchInlineSnapshot(`
21
+ [
22
+ {
23
+ "content": [
24
+ {
25
+ "text": "Hello",
26
+ "type": "text",
27
+ },
28
+ {
29
+ "image_url": {
30
+ "url": "data:image/png;base64,AAECAw==",
31
+ },
32
+ "type": "image_url",
33
+ },
34
+ ],
35
+ "role": "user",
36
+ },
37
+ ]
38
+ `);
39
+ });
40
+
41
+ it('should convert messages with image parts from Uint8Array', async () => {
42
+ const result = convertToGroqChatMessages([
43
+ {
44
+ role: 'user',
45
+ content: [
46
+ { type: 'text', text: 'Hi' },
47
+ {
48
+ type: 'file',
49
+ data: new Uint8Array([0, 1, 2, 3]),
50
+ mediaType: 'image/png',
51
+ },
52
+ ],
53
+ },
54
+ ]);
55
+
56
+ expect(result).toMatchInlineSnapshot(`
57
+ [
58
+ {
59
+ "content": [
60
+ {
61
+ "text": "Hi",
62
+ "type": "text",
63
+ },
64
+ {
65
+ "image_url": {
66
+ "url": "data:image/png;base64,AAECAw==",
67
+ },
68
+ "type": "image_url",
69
+ },
70
+ ],
71
+ "role": "user",
72
+ },
73
+ ]
74
+ `);
75
+ });
76
+
77
+ it('should convert messages with only a text part to a string content', async () => {
78
+ const result = convertToGroqChatMessages([
79
+ {
80
+ role: 'user',
81
+ content: [{ type: 'text', text: 'Hello' }],
82
+ },
83
+ ]);
84
+
85
+ expect(result).toMatchInlineSnapshot(`
86
+ [
87
+ {
88
+ "content": "Hello",
89
+ "role": "user",
90
+ },
91
+ ]
92
+ `);
93
+ });
94
+ });
95
+
96
+ describe('tool calls', () => {
97
+ it('should stringify arguments to tool calls', () => {
98
+ const result = convertToGroqChatMessages([
99
+ {
100
+ role: 'assistant',
101
+ content: [
102
+ {
103
+ type: 'tool-call',
104
+ input: { foo: 'bar123' },
105
+ toolCallId: 'quux',
106
+ toolName: 'thwomp',
107
+ },
108
+ ],
109
+ },
110
+ {
111
+ role: 'tool',
112
+ content: [
113
+ {
114
+ type: 'tool-result',
115
+ toolCallId: 'quux',
116
+ toolName: 'thwomp',
117
+ output: { type: 'json', value: { oof: '321rab' } },
118
+ },
119
+ ],
120
+ },
121
+ ]);
122
+
123
+ expect(result).toMatchInlineSnapshot(`
124
+ [
125
+ {
126
+ "content": "",
127
+ "role": "assistant",
128
+ "tool_calls": [
129
+ {
130
+ "function": {
131
+ "arguments": "{"foo":"bar123"}",
132
+ "name": "thwomp",
133
+ },
134
+ "id": "quux",
135
+ "type": "function",
136
+ },
137
+ ],
138
+ },
139
+ {
140
+ "content": "{"oof":"321rab"}",
141
+ "role": "tool",
142
+ "tool_call_id": "quux",
143
+ },
144
+ ]
145
+ `);
146
+ });
147
+
148
+ it('should send reasoning if present', () => {
149
+ const result = convertToGroqChatMessages([
150
+ {
151
+ role: 'assistant',
152
+ content: [
153
+ {
154
+ type: 'reasoning',
155
+ text: 'I think the tool will return the correct value.',
156
+ },
157
+ {
158
+ type: 'tool-call',
159
+ input: { foo: 'bar123' },
160
+ toolCallId: 'quux',
161
+ toolName: 'thwomp',
162
+ },
163
+ ],
164
+ },
165
+ ]);
166
+
167
+ expect(result).toMatchInlineSnapshot(`
168
+ [
169
+ {
170
+ "content": "",
171
+ "reasoning": "I think the tool will return the correct value.",
172
+ "role": "assistant",
173
+ "tool_calls": [
174
+ {
175
+ "function": {
176
+ "arguments": "{"foo":"bar123"}",
177
+ "name": "thwomp",
178
+ },
179
+ "id": "quux",
180
+ "type": "function",
181
+ },
182
+ ],
183
+ },
184
+ ]
185
+ `);
186
+ });
187
+
188
+ it('should not include reasoning field when no reasoning content is present', () => {
189
+ const result = convertToGroqChatMessages([
190
+ {
191
+ role: 'assistant',
192
+ content: [{ type: 'text', text: 'Hello, how can I help you?' }],
193
+ },
194
+ ]);
195
+
196
+ expect(result).toMatchInlineSnapshot(`
197
+ [
198
+ {
199
+ "content": "Hello, how can I help you?",
200
+ "role": "assistant",
201
+ },
202
+ ]
203
+ `);
204
+ });
205
+ });
@@ -0,0 +1,147 @@
1
+ import {
2
+ LanguageModelV3Prompt,
3
+ UnsupportedFunctionalityError,
4
+ } from '@ai-sdk/provider';
5
+ import { GroqChatPrompt } from './groq-api-types';
6
+ import { convertToBase64 } from '@ai-sdk/provider-utils';
7
+
8
+ export function convertToGroqChatMessages(
9
+ prompt: LanguageModelV3Prompt,
10
+ ): GroqChatPrompt {
11
+ const messages: GroqChatPrompt = [];
12
+
13
+ for (const { role, content } of prompt) {
14
+ switch (role) {
15
+ case 'system': {
16
+ messages.push({ role: 'system', content });
17
+ break;
18
+ }
19
+
20
+ case 'user': {
21
+ if (content.length === 1 && content[0].type === 'text') {
22
+ messages.push({ role: 'user', content: content[0].text });
23
+ break;
24
+ }
25
+
26
+ messages.push({
27
+ role: 'user',
28
+ content: content.map(part => {
29
+ switch (part.type) {
30
+ case 'text': {
31
+ return { type: 'text', text: part.text };
32
+ }
33
+ case 'file': {
34
+ if (!part.mediaType.startsWith('image/')) {
35
+ throw new UnsupportedFunctionalityError({
36
+ functionality: 'Non-image file content parts',
37
+ });
38
+ }
39
+
40
+ const mediaType =
41
+ part.mediaType === 'image/*' ? 'image/jpeg' : part.mediaType;
42
+
43
+ return {
44
+ type: 'image_url',
45
+ image_url: {
46
+ url:
47
+ part.data instanceof URL
48
+ ? part.data.toString()
49
+ : `data:${mediaType};base64,${convertToBase64(part.data)}`,
50
+ },
51
+ };
52
+ }
53
+ }
54
+ }),
55
+ });
56
+
57
+ break;
58
+ }
59
+
60
+ case 'assistant': {
61
+ let text = '';
62
+ let reasoning = '';
63
+ const toolCalls: Array<{
64
+ id: string;
65
+ type: 'function';
66
+ function: { name: string; arguments: string };
67
+ }> = [];
68
+
69
+ for (const part of content) {
70
+ switch (part.type) {
71
+ // groq supports reasoning for tool-calls in multi-turn conversations
72
+ // https://github.com/vercel/ai/issues/7860
73
+ case 'reasoning': {
74
+ reasoning += part.text;
75
+ break;
76
+ }
77
+
78
+ case 'text': {
79
+ text += part.text;
80
+ break;
81
+ }
82
+
83
+ case 'tool-call': {
84
+ toolCalls.push({
85
+ id: part.toolCallId,
86
+ type: 'function',
87
+ function: {
88
+ name: part.toolName,
89
+ arguments: JSON.stringify(part.input),
90
+ },
91
+ });
92
+ break;
93
+ }
94
+ }
95
+ }
96
+
97
+ messages.push({
98
+ role: 'assistant',
99
+ content: text,
100
+ ...(reasoning.length > 0 ? { reasoning } : null),
101
+ ...(toolCalls.length > 0 ? { tool_calls: toolCalls } : null),
102
+ });
103
+
104
+ break;
105
+ }
106
+
107
+ case 'tool': {
108
+ for (const toolResponse of content) {
109
+ if (toolResponse.type === 'tool-approval-response') {
110
+ continue;
111
+ }
112
+ const output = toolResponse.output;
113
+
114
+ let contentValue: string;
115
+ switch (output.type) {
116
+ case 'text':
117
+ case 'error-text':
118
+ contentValue = output.value;
119
+ break;
120
+ case 'execution-denied':
121
+ contentValue = output.reason ?? 'Tool execution denied.';
122
+ break;
123
+ case 'content':
124
+ case 'json':
125
+ case 'error-json':
126
+ contentValue = JSON.stringify(output.value);
127
+ break;
128
+ }
129
+
130
+ messages.push({
131
+ role: 'tool',
132
+ tool_call_id: toolResponse.toolCallId,
133
+ content: contentValue,
134
+ });
135
+ }
136
+ break;
137
+ }
138
+
139
+ default: {
140
+ const _exhaustiveCheck: never = role;
141
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
142
+ }
143
+ }
144
+ }
145
+
146
+ return messages;
147
+ }