@adaline/groq 0.16.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +128 -1
- package/dist/index.d.ts +128 -1
- package/dist/index.js +42 -42
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +6 -6
- package/dist/index.mjs.map +1 -1
- package/package.json +4 -4
package/dist/index.d.mts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { ChatModelSchemaType, ParamsType, ProviderV1, ChatModelV1, EmbeddingModelSchemaType, EmbeddingModelV1 } from '@adaline/provider';
|
|
2
2
|
import { z } from 'zod';
|
|
3
3
|
import { BaseChatModel } from '@adaline/openai';
|
|
4
|
-
import { MessageType } from '@adaline/types';
|
|
4
|
+
import { MessageType, ChatModelPriceType } from '@adaline/types';
|
|
5
5
|
|
|
6
6
|
declare const BaseChatModelOptions: z.ZodObject<{
|
|
7
7
|
modelName: z.ZodString;
|
|
@@ -21,6 +21,7 @@ declare class BaseChatModelGroq extends BaseChatModel {
|
|
|
21
21
|
private readonly groqApiKey;
|
|
22
22
|
constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType);
|
|
23
23
|
transformMessages(messages: MessageType[]): ParamsType;
|
|
24
|
+
getModelPricing(): ChatModelPriceType;
|
|
24
25
|
}
|
|
25
26
|
|
|
26
27
|
declare const Gemma2_9b_ITLiteral: "gemma2-9b-it";
|
|
@@ -69,6 +70,20 @@ declare const Gemma2_9b_ITSchema: {
|
|
|
69
70
|
}>;
|
|
70
71
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
71
72
|
};
|
|
73
|
+
price: {
|
|
74
|
+
modelName: string;
|
|
75
|
+
currency: string;
|
|
76
|
+
tokenRanges: {
|
|
77
|
+
minTokens: number;
|
|
78
|
+
prices: {
|
|
79
|
+
base: {
|
|
80
|
+
inputPricePerMillion: number;
|
|
81
|
+
outputPricePerMillion: number;
|
|
82
|
+
};
|
|
83
|
+
};
|
|
84
|
+
maxTokens?: number | null | undefined;
|
|
85
|
+
}[];
|
|
86
|
+
};
|
|
72
87
|
maxReasoningTokens?: number | undefined;
|
|
73
88
|
};
|
|
74
89
|
declare const Gemma2_9b_ITOptions: z.ZodObject<{
|
|
@@ -132,6 +147,20 @@ declare const Llama_3_1_8bSchema: {
|
|
|
132
147
|
}>;
|
|
133
148
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
134
149
|
};
|
|
150
|
+
price: {
|
|
151
|
+
modelName: string;
|
|
152
|
+
currency: string;
|
|
153
|
+
tokenRanges: {
|
|
154
|
+
minTokens: number;
|
|
155
|
+
prices: {
|
|
156
|
+
base: {
|
|
157
|
+
inputPricePerMillion: number;
|
|
158
|
+
outputPricePerMillion: number;
|
|
159
|
+
};
|
|
160
|
+
};
|
|
161
|
+
maxTokens?: number | null | undefined;
|
|
162
|
+
}[];
|
|
163
|
+
};
|
|
135
164
|
maxReasoningTokens?: number | undefined;
|
|
136
165
|
};
|
|
137
166
|
declare const Llama_3_1_8b_Options: z.ZodObject<{
|
|
@@ -195,6 +224,20 @@ declare const Llama_3_2_11b_VisionSchema: {
|
|
|
195
224
|
}>;
|
|
196
225
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
197
226
|
};
|
|
227
|
+
price: {
|
|
228
|
+
modelName: string;
|
|
229
|
+
currency: string;
|
|
230
|
+
tokenRanges: {
|
|
231
|
+
minTokens: number;
|
|
232
|
+
prices: {
|
|
233
|
+
base: {
|
|
234
|
+
inputPricePerMillion: number;
|
|
235
|
+
outputPricePerMillion: number;
|
|
236
|
+
};
|
|
237
|
+
};
|
|
238
|
+
maxTokens?: number | null | undefined;
|
|
239
|
+
}[];
|
|
240
|
+
};
|
|
198
241
|
maxReasoningTokens?: number | undefined;
|
|
199
242
|
};
|
|
200
243
|
declare const Llama_3_2_11b_VisionOptions: z.ZodObject<{
|
|
@@ -258,6 +301,20 @@ declare const Llama_3_2_1bSchema: {
|
|
|
258
301
|
}>;
|
|
259
302
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
260
303
|
};
|
|
304
|
+
price: {
|
|
305
|
+
modelName: string;
|
|
306
|
+
currency: string;
|
|
307
|
+
tokenRanges: {
|
|
308
|
+
minTokens: number;
|
|
309
|
+
prices: {
|
|
310
|
+
base: {
|
|
311
|
+
inputPricePerMillion: number;
|
|
312
|
+
outputPricePerMillion: number;
|
|
313
|
+
};
|
|
314
|
+
};
|
|
315
|
+
maxTokens?: number | null | undefined;
|
|
316
|
+
}[];
|
|
317
|
+
};
|
|
261
318
|
maxReasoningTokens?: number | undefined;
|
|
262
319
|
};
|
|
263
320
|
declare const Llama_3_2_1b_Options: z.ZodObject<{
|
|
@@ -321,6 +378,20 @@ declare const Llama_3_2_3bSchema: {
|
|
|
321
378
|
}>;
|
|
322
379
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
323
380
|
};
|
|
381
|
+
price: {
|
|
382
|
+
modelName: string;
|
|
383
|
+
currency: string;
|
|
384
|
+
tokenRanges: {
|
|
385
|
+
minTokens: number;
|
|
386
|
+
prices: {
|
|
387
|
+
base: {
|
|
388
|
+
inputPricePerMillion: number;
|
|
389
|
+
outputPricePerMillion: number;
|
|
390
|
+
};
|
|
391
|
+
};
|
|
392
|
+
maxTokens?: number | null | undefined;
|
|
393
|
+
}[];
|
|
394
|
+
};
|
|
324
395
|
maxReasoningTokens?: number | undefined;
|
|
325
396
|
};
|
|
326
397
|
declare const Llama_3_2_3b_Options: z.ZodObject<{
|
|
@@ -384,6 +455,20 @@ declare const Llama_3_2_90b_VisionSchema: {
|
|
|
384
455
|
}>;
|
|
385
456
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
386
457
|
};
|
|
458
|
+
price: {
|
|
459
|
+
modelName: string;
|
|
460
|
+
currency: string;
|
|
461
|
+
tokenRanges: {
|
|
462
|
+
minTokens: number;
|
|
463
|
+
prices: {
|
|
464
|
+
base: {
|
|
465
|
+
inputPricePerMillion: number;
|
|
466
|
+
outputPricePerMillion: number;
|
|
467
|
+
};
|
|
468
|
+
};
|
|
469
|
+
maxTokens?: number | null | undefined;
|
|
470
|
+
}[];
|
|
471
|
+
};
|
|
387
472
|
maxReasoningTokens?: number | undefined;
|
|
388
473
|
};
|
|
389
474
|
declare const Llama_3_2_90b_VisionOptions: z.ZodObject<{
|
|
@@ -447,6 +532,20 @@ declare const Llama_3_70bSchema: {
|
|
|
447
532
|
}>;
|
|
448
533
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
449
534
|
};
|
|
535
|
+
price: {
|
|
536
|
+
modelName: string;
|
|
537
|
+
currency: string;
|
|
538
|
+
tokenRanges: {
|
|
539
|
+
minTokens: number;
|
|
540
|
+
prices: {
|
|
541
|
+
base: {
|
|
542
|
+
inputPricePerMillion: number;
|
|
543
|
+
outputPricePerMillion: number;
|
|
544
|
+
};
|
|
545
|
+
};
|
|
546
|
+
maxTokens?: number | null | undefined;
|
|
547
|
+
}[];
|
|
548
|
+
};
|
|
450
549
|
maxReasoningTokens?: number | undefined;
|
|
451
550
|
};
|
|
452
551
|
declare const Llama_3_70bOptions: z.ZodObject<{
|
|
@@ -510,6 +609,20 @@ declare const Llama_3_8bSchema: {
|
|
|
510
609
|
}>;
|
|
511
610
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
512
611
|
};
|
|
612
|
+
price: {
|
|
613
|
+
modelName: string;
|
|
614
|
+
currency: string;
|
|
615
|
+
tokenRanges: {
|
|
616
|
+
minTokens: number;
|
|
617
|
+
prices: {
|
|
618
|
+
base: {
|
|
619
|
+
inputPricePerMillion: number;
|
|
620
|
+
outputPricePerMillion: number;
|
|
621
|
+
};
|
|
622
|
+
};
|
|
623
|
+
maxTokens?: number | null | undefined;
|
|
624
|
+
}[];
|
|
625
|
+
};
|
|
513
626
|
maxReasoningTokens?: number | undefined;
|
|
514
627
|
};
|
|
515
628
|
declare const Llama_3_8bOptions: z.ZodObject<{
|
|
@@ -573,6 +686,20 @@ declare const LlamaGuard_3_8bSchema: {
|
|
|
573
686
|
}>;
|
|
574
687
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
575
688
|
};
|
|
689
|
+
price: {
|
|
690
|
+
modelName: string;
|
|
691
|
+
currency: string;
|
|
692
|
+
tokenRanges: {
|
|
693
|
+
minTokens: number;
|
|
694
|
+
prices: {
|
|
695
|
+
base: {
|
|
696
|
+
inputPricePerMillion: number;
|
|
697
|
+
outputPricePerMillion: number;
|
|
698
|
+
};
|
|
699
|
+
};
|
|
700
|
+
maxTokens?: number | null | undefined;
|
|
701
|
+
}[];
|
|
702
|
+
};
|
|
576
703
|
maxReasoningTokens?: number | undefined;
|
|
577
704
|
};
|
|
578
705
|
declare const LlamaGuard_3_8bOptions: z.ZodObject<{
|
package/dist/index.d.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { ChatModelSchemaType, ParamsType, ProviderV1, ChatModelV1, EmbeddingModelSchemaType, EmbeddingModelV1 } from '@adaline/provider';
|
|
2
2
|
import { z } from 'zod';
|
|
3
3
|
import { BaseChatModel } from '@adaline/openai';
|
|
4
|
-
import { MessageType } from '@adaline/types';
|
|
4
|
+
import { MessageType, ChatModelPriceType } from '@adaline/types';
|
|
5
5
|
|
|
6
6
|
declare const BaseChatModelOptions: z.ZodObject<{
|
|
7
7
|
modelName: z.ZodString;
|
|
@@ -21,6 +21,7 @@ declare class BaseChatModelGroq extends BaseChatModel {
|
|
|
21
21
|
private readonly groqApiKey;
|
|
22
22
|
constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType);
|
|
23
23
|
transformMessages(messages: MessageType[]): ParamsType;
|
|
24
|
+
getModelPricing(): ChatModelPriceType;
|
|
24
25
|
}
|
|
25
26
|
|
|
26
27
|
declare const Gemma2_9b_ITLiteral: "gemma2-9b-it";
|
|
@@ -69,6 +70,20 @@ declare const Gemma2_9b_ITSchema: {
|
|
|
69
70
|
}>;
|
|
70
71
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
71
72
|
};
|
|
73
|
+
price: {
|
|
74
|
+
modelName: string;
|
|
75
|
+
currency: string;
|
|
76
|
+
tokenRanges: {
|
|
77
|
+
minTokens: number;
|
|
78
|
+
prices: {
|
|
79
|
+
base: {
|
|
80
|
+
inputPricePerMillion: number;
|
|
81
|
+
outputPricePerMillion: number;
|
|
82
|
+
};
|
|
83
|
+
};
|
|
84
|
+
maxTokens?: number | null | undefined;
|
|
85
|
+
}[];
|
|
86
|
+
};
|
|
72
87
|
maxReasoningTokens?: number | undefined;
|
|
73
88
|
};
|
|
74
89
|
declare const Gemma2_9b_ITOptions: z.ZodObject<{
|
|
@@ -132,6 +147,20 @@ declare const Llama_3_1_8bSchema: {
|
|
|
132
147
|
}>;
|
|
133
148
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
134
149
|
};
|
|
150
|
+
price: {
|
|
151
|
+
modelName: string;
|
|
152
|
+
currency: string;
|
|
153
|
+
tokenRanges: {
|
|
154
|
+
minTokens: number;
|
|
155
|
+
prices: {
|
|
156
|
+
base: {
|
|
157
|
+
inputPricePerMillion: number;
|
|
158
|
+
outputPricePerMillion: number;
|
|
159
|
+
};
|
|
160
|
+
};
|
|
161
|
+
maxTokens?: number | null | undefined;
|
|
162
|
+
}[];
|
|
163
|
+
};
|
|
135
164
|
maxReasoningTokens?: number | undefined;
|
|
136
165
|
};
|
|
137
166
|
declare const Llama_3_1_8b_Options: z.ZodObject<{
|
|
@@ -195,6 +224,20 @@ declare const Llama_3_2_11b_VisionSchema: {
|
|
|
195
224
|
}>;
|
|
196
225
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
197
226
|
};
|
|
227
|
+
price: {
|
|
228
|
+
modelName: string;
|
|
229
|
+
currency: string;
|
|
230
|
+
tokenRanges: {
|
|
231
|
+
minTokens: number;
|
|
232
|
+
prices: {
|
|
233
|
+
base: {
|
|
234
|
+
inputPricePerMillion: number;
|
|
235
|
+
outputPricePerMillion: number;
|
|
236
|
+
};
|
|
237
|
+
};
|
|
238
|
+
maxTokens?: number | null | undefined;
|
|
239
|
+
}[];
|
|
240
|
+
};
|
|
198
241
|
maxReasoningTokens?: number | undefined;
|
|
199
242
|
};
|
|
200
243
|
declare const Llama_3_2_11b_VisionOptions: z.ZodObject<{
|
|
@@ -258,6 +301,20 @@ declare const Llama_3_2_1bSchema: {
|
|
|
258
301
|
}>;
|
|
259
302
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
260
303
|
};
|
|
304
|
+
price: {
|
|
305
|
+
modelName: string;
|
|
306
|
+
currency: string;
|
|
307
|
+
tokenRanges: {
|
|
308
|
+
minTokens: number;
|
|
309
|
+
prices: {
|
|
310
|
+
base: {
|
|
311
|
+
inputPricePerMillion: number;
|
|
312
|
+
outputPricePerMillion: number;
|
|
313
|
+
};
|
|
314
|
+
};
|
|
315
|
+
maxTokens?: number | null | undefined;
|
|
316
|
+
}[];
|
|
317
|
+
};
|
|
261
318
|
maxReasoningTokens?: number | undefined;
|
|
262
319
|
};
|
|
263
320
|
declare const Llama_3_2_1b_Options: z.ZodObject<{
|
|
@@ -321,6 +378,20 @@ declare const Llama_3_2_3bSchema: {
|
|
|
321
378
|
}>;
|
|
322
379
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
323
380
|
};
|
|
381
|
+
price: {
|
|
382
|
+
modelName: string;
|
|
383
|
+
currency: string;
|
|
384
|
+
tokenRanges: {
|
|
385
|
+
minTokens: number;
|
|
386
|
+
prices: {
|
|
387
|
+
base: {
|
|
388
|
+
inputPricePerMillion: number;
|
|
389
|
+
outputPricePerMillion: number;
|
|
390
|
+
};
|
|
391
|
+
};
|
|
392
|
+
maxTokens?: number | null | undefined;
|
|
393
|
+
}[];
|
|
394
|
+
};
|
|
324
395
|
maxReasoningTokens?: number | undefined;
|
|
325
396
|
};
|
|
326
397
|
declare const Llama_3_2_3b_Options: z.ZodObject<{
|
|
@@ -384,6 +455,20 @@ declare const Llama_3_2_90b_VisionSchema: {
|
|
|
384
455
|
}>;
|
|
385
456
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
386
457
|
};
|
|
458
|
+
price: {
|
|
459
|
+
modelName: string;
|
|
460
|
+
currency: string;
|
|
461
|
+
tokenRanges: {
|
|
462
|
+
minTokens: number;
|
|
463
|
+
prices: {
|
|
464
|
+
base: {
|
|
465
|
+
inputPricePerMillion: number;
|
|
466
|
+
outputPricePerMillion: number;
|
|
467
|
+
};
|
|
468
|
+
};
|
|
469
|
+
maxTokens?: number | null | undefined;
|
|
470
|
+
}[];
|
|
471
|
+
};
|
|
387
472
|
maxReasoningTokens?: number | undefined;
|
|
388
473
|
};
|
|
389
474
|
declare const Llama_3_2_90b_VisionOptions: z.ZodObject<{
|
|
@@ -447,6 +532,20 @@ declare const Llama_3_70bSchema: {
|
|
|
447
532
|
}>;
|
|
448
533
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
449
534
|
};
|
|
535
|
+
price: {
|
|
536
|
+
modelName: string;
|
|
537
|
+
currency: string;
|
|
538
|
+
tokenRanges: {
|
|
539
|
+
minTokens: number;
|
|
540
|
+
prices: {
|
|
541
|
+
base: {
|
|
542
|
+
inputPricePerMillion: number;
|
|
543
|
+
outputPricePerMillion: number;
|
|
544
|
+
};
|
|
545
|
+
};
|
|
546
|
+
maxTokens?: number | null | undefined;
|
|
547
|
+
}[];
|
|
548
|
+
};
|
|
450
549
|
maxReasoningTokens?: number | undefined;
|
|
451
550
|
};
|
|
452
551
|
declare const Llama_3_70bOptions: z.ZodObject<{
|
|
@@ -510,6 +609,20 @@ declare const Llama_3_8bSchema: {
|
|
|
510
609
|
}>;
|
|
511
610
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
512
611
|
};
|
|
612
|
+
price: {
|
|
613
|
+
modelName: string;
|
|
614
|
+
currency: string;
|
|
615
|
+
tokenRanges: {
|
|
616
|
+
minTokens: number;
|
|
617
|
+
prices: {
|
|
618
|
+
base: {
|
|
619
|
+
inputPricePerMillion: number;
|
|
620
|
+
outputPricePerMillion: number;
|
|
621
|
+
};
|
|
622
|
+
};
|
|
623
|
+
maxTokens?: number | null | undefined;
|
|
624
|
+
}[];
|
|
625
|
+
};
|
|
513
626
|
maxReasoningTokens?: number | undefined;
|
|
514
627
|
};
|
|
515
628
|
declare const Llama_3_8bOptions: z.ZodObject<{
|
|
@@ -573,6 +686,20 @@ declare const LlamaGuard_3_8bSchema: {
|
|
|
573
686
|
}>;
|
|
574
687
|
schema: z.ZodObject<z.ZodRawShape, z.UnknownKeysParam, z.ZodTypeAny, unknown, unknown>;
|
|
575
688
|
};
|
|
689
|
+
price: {
|
|
690
|
+
modelName: string;
|
|
691
|
+
currency: string;
|
|
692
|
+
tokenRanges: {
|
|
693
|
+
minTokens: number;
|
|
694
|
+
prices: {
|
|
695
|
+
base: {
|
|
696
|
+
inputPricePerMillion: number;
|
|
697
|
+
outputPricePerMillion: number;
|
|
698
|
+
};
|
|
699
|
+
};
|
|
700
|
+
maxTokens?: number | null | undefined;
|
|
701
|
+
}[];
|
|
702
|
+
};
|
|
576
703
|
maxReasoningTokens?: number | undefined;
|
|
577
704
|
};
|
|
578
705
|
declare const LlamaGuard_3_8bOptions: z.ZodObject<{
|
package/dist/index.js
CHANGED
|
@@ -4,49 +4,49 @@ var provider = require('@adaline/provider');
|
|
|
4
4
|
var zod = require('zod');
|
|
5
5
|
var openai = require('@adaline/openai');
|
|
6
6
|
|
|
7
|
-
var
|
|
8
|
-
`)):
|
|
9
|
-
`));}),
|
|
10
|
-
${this.chatModelLiterals().join(", ")}`)});let
|
|
7
|
+
var a={"gemma2-9b-it":{modelName:"gemma2-9b-it",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.2,outputPricePerMillion:.2}}}]},"llama-3.1-8b-instant":{modelName:"llama-3.1-8b-instant",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.05,outputPricePerMillion:.08}}}]},"llama-3.2-11b-vision-preview":{modelName:"llama-3.2-11b-vision-preview",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.18,outputPricePerMillion:.18}}}]},"llama-3.2-1b-preview":{modelName:"llama-3.2-1b-preview",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.04,outputPricePerMillion:.04}}}]},"llama-3.2-3b-preview":{modelName:"llama-3.2-3b-preview",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.06,outputPricePerMillion:.06}}}]},"llama-3.2-90b-vision-preview":{modelName:"llama-3.2-90b-vision-preview",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.7,outputPricePerMillion:.8}}}]},"llama3-70b-8192":{modelName:"llama3-70b-8192",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.59,outputPricePerMillion:.79}}}]},"llama3-8b-8192":{modelName:"llama3-8b-8192",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.05,outputPricePerMillion:.08}}}]},"llama-guard-3-8b":{modelName:"llama-guard-3-8b",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.2,outputPricePerMillion:.2}}}]}};var n=zod.z.object({modelName:zod.z.string().min(1),apiKey:zod.z.string().min(1)}),t=class extends openai.BaseChatModel{constructor(i,c){let r=n.parse(c),p=h.baseUrl;super(i,{modelName:r.modelName,apiKey:r.apiKey,baseUrl:p,completeChatUrl:`${p}/chat/completions`,streamChatUrl:`${p}/chat/completions`});this.version="v1";this.modelSchema=i,this.modelName=r.modelName,this.groqApiKey=r.apiKey;}transformMessages(i){let c=i.some(l=>l.role==="system"),r=i.some(l=>l.content.some(_=>_.modality==="image"));if(c&&r)throw new provider.InvalidMessagesError({info:`Invalid message content for model : '${this.modelName}'`,cause:new Error("Prompting with images is incompatible with system messages`)")});let p=super.transformMessages(i);return p.messages.forEach(l=>{l.role==="system"?typeof l.content!="string"&&(l.content=l.content.map(_=>_.text).join(`
|
|
8
|
+
`)):l.role==="assistant"&&l.content&&typeof l.content!="string"&&(l.content=l.content.map(_=>_.text).join(`
|
|
9
|
+
`));}),p}getModelPricing(){if(!(this.modelName in a))throw new provider.ModelResponseError({info:`Invalid model pricing for model : '${this.modelName}'`,cause:new Error(`No pricing configuration found for model "${this.modelName}"`)});return a[this.modelName]}};var v=provider.RangeConfigItem({param:"temperature",title:provider.CHAT_CONFIG.TEMPERATURE.title,description:provider.CHAT_CONFIG.TEMPERATURE.description,min:0,max:2,step:.01,default:1}),N=s=>provider.RangeConfigItem({param:"max_tokens",title:provider.CHAT_CONFIG.MAX_TOKENS.title,description:provider.CHAT_CONFIG.MAX_TOKENS.description,min:0,max:s,step:1,default:0}),q=provider.MultiStringConfigItem({param:"stop",title:provider.CHAT_CONFIG.STOP(4).title,description:provider.CHAT_CONFIG.STOP(4).description,max:4}),w=provider.RangeConfigItem({param:"top_p",title:provider.CHAT_CONFIG.TOP_P.title,description:provider.CHAT_CONFIG.TOP_P.description,min:0,max:1,step:.01,default:1}),z=provider.RangeConfigItem({param:"frequency_penalty",title:provider.CHAT_CONFIG.FREQUENCY_PENALTY.title,description:provider.CHAT_CONFIG.FREQUENCY_PENALTY.description,min:-2,max:2,step:.01,default:0}),B=provider.RangeConfigItem({param:"presence_penalty",title:provider.CHAT_CONFIG.PRESENCE_PENALTY.title,description:provider.CHAT_CONFIG.PRESENCE_PENALTY.description,min:-2,max:2,step:.01,default:0}),D=provider.RangeConfigItem({param:"seed",title:provider.CHAT_CONFIG.SEED.title,description:provider.CHAT_CONFIG.SEED.description,min:0,max:1e6,step:1,default:0}),V=provider.SelectStringConfigItem({param:"response_format",title:provider.CHAT_CONFIG.RESPONSE_FORMAT.title,description:provider.CHAT_CONFIG.RESPONSE_FORMAT.description,default:"text",choices:["text","json_object"]}),F=provider.SelectStringConfigItem({param:"tool_choice",title:"Tool choice",description:"Controls which (if any) tool is called by the model. 'none' means the model will not call a function. 'auto' means the model can pick between generating a message or calling a tool.",default:"auto",choices:["auto","required","none"]});var W=s=>zod.z.object({temperature:v.schema,maxTokens:N(s).schema,stop:q.schema,topP:w.schema,frequencyPenalty:z.schema,presencePenalty:B.schema,seed:D.schema.transform(e=>e===0?void 0:e),responseFormat:V.schema,toolChoice:F.schema}),ee=s=>({temperature:v.def,maxTokens:N(s).def,stop:q.def,topP:w.def,frequencyPenalty:z.def,presencePenalty:B.def,seed:D.def,responseFormat:V.def,toolChoice:F.def});var o={base:s=>({def:ee(s),schema:W(s)})};var u="gemma2-9b-it",ge="Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.",U=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:u,description:ge,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema},price:a[u]}),oe=n,M=class extends t{constructor(e){super(U,e);}};var f="llama-3.1-8b-instant",Ie="The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",j=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:f,description:Ie,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[f]}),ae=n,b=class extends t{constructor(e){super(j,e);}};var y="llama-3.2-11b-vision-preview",ve="The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image. The models outperform many of the available open source and closed multimodal models on common industry benchmarks.",K=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelModalitiesEnum).parse({name:y,description:ve,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[y]}),te=n,T=class extends t{constructor(e){super(K,e);}};var O="llama-3.2-1b-preview",De="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",$=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:O,description:De,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[O]}),se=n,g=class extends t{constructor(e){super($,e);}};var C="llama-3.2-3b-preview",$e="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",Y=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:C,description:$e,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[C]}),ie=n,L=class extends t{constructor(e){super(Y,e);}};var x="llama-3.2-90b-vision-preview",Je="The Llama 3.2-90B Vision instruction-tuned models are optimized for advanced visual recognition, complex image reasoning, detailed captioning, and answering intricate questions about images. These models achieve state-of-the-art results on multiple industry benchmarks for multimodal tasks.",Q=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelModalitiesEnum).parse({name:x,description:Je,maxInputTokens:131072,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[x]}),ne=n,S=class extends t{constructor(e){super(Q,e);}};var P="llama3-70b-8192",so="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",X=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:P,description:so,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema},price:a[P]}),me=n,I=class extends t{constructor(e){super(X,e);}};var k="llama3-8b-8192",po="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",Z=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:k,description:po,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema},price:a[k]}),le=n,E=class extends t{constructor(e){super(Z,e);}};var R="llama-guard-3-8b",fo="Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.",H=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:R,description:fo,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema},price:a[R]}),re=n,A=class extends t{constructor(e){super(H,e);}};var bo="groq",h=class{constructor(){this.version="v1";this.name=bo;this.chatModelFactories={[u]:{model:M,modelOptions:oe,modelSchema:U},[R]:{model:A,modelOptions:re,modelSchema:H},[k]:{model:E,modelOptions:le,modelSchema:Z},[P]:{model:I,modelOptions:me,modelSchema:X},[f]:{model:b,modelOptions:ae,modelSchema:j},[y]:{model:T,modelOptions:te,modelSchema:K},[x]:{model:S,modelOptions:ne,modelSchema:Q},[C]:{model:L,modelOptions:ie,modelSchema:Y},[O]:{model:g,modelOptions:se,modelSchema:$}};this.embeddingModelFactories={};}chatModelLiterals(){return Object.keys(this.chatModelFactories)}chatModelSchemas(){return Object.keys(this.chatModelFactories).reduce((e,i)=>(e[i]=this.chatModelFactories[i].modelSchema,e),{})}chatModel(e){let i=e.modelName;if(!(i in this.chatModelFactories))throw new provider.ProviderError({info:`Groq chat model: ${i} not found`,cause:new Error(`Groq chat model: ${i} not found, available chat models:
|
|
10
|
+
${this.chatModelLiterals().join(", ")}`)});let c=this.chatModelFactories[i].model,r=this.chatModelFactories[i].modelOptions.parse(e);return new c(r)}embeddingModelLiterals(){return Object.keys(this.embeddingModelFactories)}embeddingModelSchemas(){return Object.keys(this.embeddingModelFactories).reduce((e,i)=>(e[i]=this.embeddingModelFactories[i].modelSchema,e),{})}embeddingModel(e){throw new provider.ProviderError({info:"Groq does not support embedding models yet",cause:new Error("Groq does not support embedding models yet")})}};h.baseUrl="https://api.groq.com/openai/v1";
|
|
11
11
|
|
|
12
|
-
exports.BaseChatModelGroq =
|
|
13
|
-
exports.BaseChatModelOptions =
|
|
14
|
-
exports.Gemma2_9b_IT =
|
|
15
|
-
exports.Gemma2_9b_ITLiteral =
|
|
16
|
-
exports.Gemma2_9b_ITOptions =
|
|
17
|
-
exports.Gemma2_9b_ITSchema =
|
|
18
|
-
exports.Groq =
|
|
19
|
-
exports.LlamaGuard_3_8b =
|
|
20
|
-
exports.LlamaGuard_3_8bLiteral =
|
|
21
|
-
exports.LlamaGuard_3_8bOptions =
|
|
22
|
-
exports.LlamaGuard_3_8bSchema =
|
|
23
|
-
exports.Llama_3_1_8b =
|
|
24
|
-
exports.Llama_3_1_8bLiteral =
|
|
25
|
-
exports.Llama_3_1_8bSchema =
|
|
26
|
-
exports.Llama_3_1_8b_Options =
|
|
27
|
-
exports.Llama_3_2_11b_Vision =
|
|
28
|
-
exports.Llama_3_2_11b_VisionLiteral =
|
|
29
|
-
exports.Llama_3_2_11b_VisionOptions =
|
|
30
|
-
exports.Llama_3_2_11b_VisionSchema =
|
|
31
|
-
exports.Llama_3_2_1b =
|
|
32
|
-
exports.Llama_3_2_1bLiteral =
|
|
33
|
-
exports.Llama_3_2_1bSchema =
|
|
34
|
-
exports.Llama_3_2_1b_Options =
|
|
35
|
-
exports.Llama_3_2_3b =
|
|
36
|
-
exports.Llama_3_2_3bLiteral =
|
|
37
|
-
exports.Llama_3_2_3bSchema =
|
|
38
|
-
exports.Llama_3_2_3b_Options =
|
|
39
|
-
exports.Llama_3_2_90b_Vision =
|
|
40
|
-
exports.Llama_3_2_90b_VisionLiteral =
|
|
12
|
+
exports.BaseChatModelGroq = t;
|
|
13
|
+
exports.BaseChatModelOptions = n;
|
|
14
|
+
exports.Gemma2_9b_IT = M;
|
|
15
|
+
exports.Gemma2_9b_ITLiteral = u;
|
|
16
|
+
exports.Gemma2_9b_ITOptions = oe;
|
|
17
|
+
exports.Gemma2_9b_ITSchema = U;
|
|
18
|
+
exports.Groq = h;
|
|
19
|
+
exports.LlamaGuard_3_8b = A;
|
|
20
|
+
exports.LlamaGuard_3_8bLiteral = R;
|
|
21
|
+
exports.LlamaGuard_3_8bOptions = re;
|
|
22
|
+
exports.LlamaGuard_3_8bSchema = H;
|
|
23
|
+
exports.Llama_3_1_8b = b;
|
|
24
|
+
exports.Llama_3_1_8bLiteral = f;
|
|
25
|
+
exports.Llama_3_1_8bSchema = j;
|
|
26
|
+
exports.Llama_3_1_8b_Options = ae;
|
|
27
|
+
exports.Llama_3_2_11b_Vision = T;
|
|
28
|
+
exports.Llama_3_2_11b_VisionLiteral = y;
|
|
29
|
+
exports.Llama_3_2_11b_VisionOptions = te;
|
|
30
|
+
exports.Llama_3_2_11b_VisionSchema = K;
|
|
31
|
+
exports.Llama_3_2_1b = g;
|
|
32
|
+
exports.Llama_3_2_1bLiteral = O;
|
|
33
|
+
exports.Llama_3_2_1bSchema = $;
|
|
34
|
+
exports.Llama_3_2_1b_Options = se;
|
|
35
|
+
exports.Llama_3_2_3b = L;
|
|
36
|
+
exports.Llama_3_2_3bLiteral = C;
|
|
37
|
+
exports.Llama_3_2_3bSchema = Y;
|
|
38
|
+
exports.Llama_3_2_3b_Options = ie;
|
|
39
|
+
exports.Llama_3_2_90b_Vision = S;
|
|
40
|
+
exports.Llama_3_2_90b_VisionLiteral = x;
|
|
41
41
|
exports.Llama_3_2_90b_VisionOptions = ne;
|
|
42
|
-
exports.Llama_3_2_90b_VisionSchema =
|
|
43
|
-
exports.Llama_3_70b =
|
|
44
|
-
exports.Llama_3_70bLiteral =
|
|
45
|
-
exports.Llama_3_70bOptions =
|
|
46
|
-
exports.Llama_3_70bSchema =
|
|
47
|
-
exports.Llama_3_8b =
|
|
48
|
-
exports.Llama_3_8bLiteral =
|
|
49
|
-
exports.Llama_3_8bOptions =
|
|
50
|
-
exports.Llama_3_8bSchema =
|
|
42
|
+
exports.Llama_3_2_90b_VisionSchema = Q;
|
|
43
|
+
exports.Llama_3_70b = I;
|
|
44
|
+
exports.Llama_3_70bLiteral = P;
|
|
45
|
+
exports.Llama_3_70bOptions = me;
|
|
46
|
+
exports.Llama_3_70bSchema = X;
|
|
47
|
+
exports.Llama_3_8b = E;
|
|
48
|
+
exports.Llama_3_8bLiteral = k;
|
|
49
|
+
exports.Llama_3_8bOptions = le;
|
|
50
|
+
exports.Llama_3_8bSchema = Z;
|
|
51
51
|
//# sourceMappingURL=index.js.map
|
|
52
52
|
//# sourceMappingURL=index.js.map
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/models/chat-models/base-chat-model.groq.ts","../src/configs/chat-model/common.config.chat-model.groq.ts","../src/configs/chat-model/base.config.chat-model.groq.ts","../src/configs/configs.groq.ts","../src/models/chat-models/gemma2-9b-it.groq.ts","../src/models/chat-models/llama-3-1-8b.groq.ts","../src/models/chat-models/llama-3-2-11b-vision.groq.ts","../src/models/chat-models/llama-3-2-1b.groq.ts","../src/models/chat-models/llama-3-2-3b.groq.ts","../src/models/chat-models/llama-3-2-90b-vision.groq.ts","../src/models/chat-models/llama-3-70b.groq.ts","../src/models/chat-models/llama-3-8b.groq.ts","../src/models/chat-models/llama-guard-3-8b.groq.ts","../src/provider/provider.groq.ts"],"names":["BaseChatModelOptions","z","BaseChatModelGroq","BaseChatModel","modelSchema","options","parsedOptions","baseUrl","Groq","messages","hasSystemRole","msg","hasImageModality","content","InvalidMessagesError","transformedMessages","message","temperature","RangeConfigItem","CHAT_CONFIG","maxTokens","maxOutputTokens","stop","MultiStringConfigItem","topP","frequencyPenalty","presencePenalty","seed","responseFormat","SelectStringConfigItem","toolChoice","ChatModelBaseConfigSchema","value","ChatModelBaseConfigDef","GroqChatModelConfigs","Gemma2_9b_ITLiteral","Gemma2_9b_ITDescription","Gemma2_9b_ITSchema","ChatModelSchema","OpenAIChatModelRoles","OpenAIChatModelTextToolModalitiesEnum","OpenAIChatModelRolesMap","OpenAIChatModelTextToolModalities","Gemma2_9b_ITOptions","Gemma2_9b_IT","Llama_3_1_8bLiteral","Llama_3_1_8bDescription","Llama_3_1_8bSchema","Llama_3_1_8b_Options","Llama_3_1_8b","Llama_3_2_11b_VisionLiteral","Llama_3_2_11b_VisionDescription","Llama_3_2_11b_VisionSchema","OpenAIChatModelModalitiesEnum","OpenAIChatModelModalities","Llama_3_2_11b_VisionOptions","Llama_3_2_11b_Vision","Llama_3_2_1bLiteral","Llama_3_2_1bDescription","Llama_3_2_1bSchema","Llama_3_2_1b_Options","Llama_3_2_1b","Llama_3_2_3bLiteral","Llama_3_2_3bDescription","Llama_3_2_3bSchema","Llama_3_2_3b_Options","Llama_3_2_3b","Llama_3_2_90b_VisionLiteral","Llama_3_2_90b_VisionDescription","Llama_3_2_90b_VisionSchema","Llama_3_2_90b_VisionOptions","Llama_3_2_90b_Vision","Llama_3_70bLiteral","Llama_3_70bDescription","Llama_3_70bSchema","Llama_3_70bOptions","Llama_3_70b","Llama_3_8bLiteral","Llama_3_8bDescription","Llama_3_8bSchema","Llama_3_8bOptions","Llama_3_8b","LlamaGuard_3_8bLiteral","LlamaGuard_3_8bDescription","LlamaGuard_3_8bSchema","LlamaGuard_3_8bOptions","LlamaGuard_3_8b","ProviderLiteral","acc","key","modelName","ProviderError","model"],"mappings":";;;;;;IAQMA,CAAuBC,CAAAA,KAAAA,CAAE,MAAO,CAAA,CACpC,UAAWA,KAAE,CAAA,MAAA,EAAS,CAAA,GAAA,CAAI,CAAC,CAC3B,CAAA,MAAA,CAAQA,KAAE,CAAA,MAAA,GAAS,GAAI,CAAA,CAAC,CAC1B,CAAC,CAAA,CAGKC,EAAN,cAAgCC,oBAAc,CAO5C,WAAA,CAAYC,EAAkCC,CAAmC,CAAA,CAC/E,IAAMC,CAAAA,CAAgBN,EAAqB,KAAMK,CAAAA,CAAO,CAClDE,CAAAA,CAAAA,CAAUC,EAAK,OACrB,CAAA,KAAA,CAAMJ,CAAa,CAAA,CACjB,UAAWE,CAAc,CAAA,SAAA,CACzB,MAAQA,CAAAA,CAAAA,CAAc,OACtB,OAASC,CAAAA,CAAAA,CACT,eAAiB,CAAA,CAAA,EAAGA,CAAO,CAC3B,iBAAA,CAAA,CAAA,aAAA,CAAe,CAAGA,EAAAA,CAAO,mBAC3B,CAAC,CAAA,CAfH,KAAS,OAAU,CAAA,IAAA,CAgBjB,KAAK,WAAcH,CAAAA,CAAAA,CACnB,IAAK,CAAA,SAAA,CAAYE,EAAc,SAC/B,CAAA,IAAA,CAAK,UAAaA,CAAAA,CAAAA,CAAc,OAClC,CAEA,iBAAA,CAAkBG,CAAqC,CAAA,CACrD,IAAMC,CAAgBD,CAAAA,CAAAA,CAAS,IAAME,CAAAA,CAAAA,EAAQA,EAAI,IAAS,GAAA,QAAQ,CAC5DC,CAAAA,CAAAA,CAAmBH,EAAS,IAAME,CAAAA,CAAAA,EAAQA,CAAI,CAAA,OAAA,CAAQ,KAAME,CAAiBA,EAAAA,CAAAA,CAAQ,QAAa,GAAA,OAAO,CAAC,CAEhH,CAAA,GAAIH,GAAiBE,CACnB,CAAA,MAAM,IAAIE,6BAAqB,CAAA,CAC7B,IAAM,CAAA,CAAA,qCAAA,EAAwC,KAAK,SAAS,CAAA,CAAA,CAAA,CAC5D,KAAO,CAAA,IAAI,MAAM,8DAA8D,CACjF,CAAC,CAAA,CAGH,IAAMC,CAAsB,CAAA,KAAA,CAAM,kBAAkBN,CAAQ,CAAA,CAI5D,OAAAM,CAAoB,CAAA,QAAA,CAAS,OAASC,CAAAA,CAAAA,EAAY,CAC5CA,CAAQ,CAAA,IAAA,GAAS,QACf,CAAA,OAAOA,EAAQ,OAAY,EAAA,QAAA,GAC7BA,CAAQ,CAAA,OAAA,CAAUA,EAAQ,OAAQ,CAAA,GAAA,CAAKH,GAAYA,CAAQ,CAAA,IAAI,EAAE,IAAK,CAAA,CAAA;AAAA,CAAI,GAEnEG,CAAQ,CAAA,IAAA,GAAS,aACtBA,CAAQ,CAAA,OAAA,EAAW,OAAOA,CAAQ,CAAA,OAAA,EAAY,WAChDA,CAAQ,CAAA,OAAA,CAAUA,EAAQ,OAAQ,CAAA,GAAA,CAAKH,GAAYA,CAAQ,CAAA,IAAI,EAAE,IAAK,CAAA,CAAA;AAAA,CAAI,CAGhF,EAAA,CAAC,CAEME,CAAAA,CACT,CACF,EC/DA,IAAME,CAAcC,CAAAA,wBAAAA,CAAgB,CAClC,KAAA,CAAO,aACP,CAAA,KAAA,CAAOC,oBAAY,CAAA,WAAA,CAAY,KAC/B,CAAA,WAAA,CAAaA,oBAAY,CAAA,WAAA,CAAY,YACrC,GAAK,CAAA,CAAA,CACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAEKC,CAAAA,CAAAA,CAAaC,CACjBH,EAAAA,wBAAAA,CAAgB,CACd,KAAA,CAAO,YACP,CAAA,KAAA,CAAOC,qBAAY,UAAW,CAAA,KAAA,CAC9B,WAAaA,CAAAA,oBAAAA,CAAY,UAAW,CAAA,WAAA,CACpC,GAAK,CAAA,CAAA,CACL,GAAKE,CAAAA,CAAAA,CACL,IAAM,CAAA,CAAA,CACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEGC,EAAOC,8BAAsB,CAAA,CACjC,KAAO,CAAA,MAAA,CACP,KAAOJ,CAAAA,oBAAAA,CAAY,IAAK,CAAA,CAAC,EAAE,KAC3B,CAAA,WAAA,CAAaA,oBAAY,CAAA,IAAA,CAAK,CAAC,CAAA,CAAE,WACjC,CAAA,GAAA,CAAK,CACP,CAAC,CAAA,CAEKK,CAAON,CAAAA,wBAAAA,CAAgB,CAC3B,KAAA,CAAO,OACP,CAAA,KAAA,CAAOC,oBAAY,CAAA,KAAA,CAAM,KACzB,CAAA,WAAA,CAAaA,oBAAY,CAAA,KAAA,CAAM,WAC/B,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKM,EAAmBP,wBAAgB,CAAA,CACvC,KAAO,CAAA,mBAAA,CACP,KAAOC,CAAAA,oBAAAA,CAAY,iBAAkB,CAAA,KAAA,CACrC,YAAaA,oBAAY,CAAA,iBAAA,CAAkB,WAC3C,CAAA,GAAA,CAAK,CACL,CAAA,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,CAEKO,CAAAA,CAAAA,CAAkBR,wBAAgB,CAAA,CACtC,MAAO,kBACP,CAAA,KAAA,CAAOC,oBAAY,CAAA,gBAAA,CAAiB,KACpC,CAAA,WAAA,CAAaA,oBAAY,CAAA,gBAAA,CAAiB,YAC1C,GAAK,CAAA,CAAA,CAAA,CACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAEKQ,CAAAA,CAAAA,CAAOT,wBAAgB,CAAA,CAC3B,KAAO,CAAA,MAAA,CACP,KAAOC,CAAAA,oBAAAA,CAAY,IAAK,CAAA,KAAA,CACxB,WAAaA,CAAAA,oBAAAA,CAAY,IAAK,CAAA,WAAA,CAC9B,GAAK,CAAA,CAAA,CACL,IAAK,GACL,CAAA,IAAA,CAAM,CACN,CAAA,OAAA,CAAS,CACX,CAAC,CAEKS,CAAAA,CAAAA,CAAiBC,gCAAuB,CAC5C,KAAA,CAAO,iBACP,CAAA,KAAA,CAAOV,oBAAY,CAAA,eAAA,CAAgB,KACnC,CAAA,WAAA,CAAaA,qBAAY,eAAgB,CAAA,WAAA,CACzC,OAAS,CAAA,MAAA,CACT,OAAS,CAAA,CAAC,MAAQ,CAAA,aAAa,CACjC,CAAC,CAEKW,CAAAA,CAAAA,CAAaD,+BAAuB,CAAA,CACxC,KAAO,CAAA,aAAA,CACP,MAAO,aACP,CAAA,WAAA,CACE,+LAGF,CAAA,OAAA,CAAS,MACT,CAAA,OAAA,CAAS,CAAC,MAAA,CAAQ,WAAY,MAAM,CACtC,CAAC,CAAA,CCzED,IAAME,CAAAA,CAA6BV,CACjCpB,EAAAA,KAAAA,CAAE,OAAO,CACP,WAAA,CAAagB,CAAY,CAAA,MAAA,CACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,OACtC,IAAMC,CAAAA,CAAAA,CAAK,MACX,CAAA,IAAA,CAAME,CAAK,CAAA,MAAA,CACX,gBAAkBC,CAAAA,CAAAA,CAAiB,OACnC,eAAiBC,CAAAA,CAAAA,CAAgB,MACjC,CAAA,IAAA,CAAMC,CAAK,CAAA,MAAA,CAAO,SAAWK,CAAAA,CAAAA,EAAWA,IAAU,CAAI,CAAA,KAAA,CAAA,CAAYA,CAAM,CAAA,CACxE,cAAgBJ,CAAAA,CAAAA,CAAe,MAC/B,CAAA,UAAA,CAAYE,EAAW,MACzB,CAAC,CAEGG,CAAAA,CAAAA,CAA0BZ,CAC7B,GAAA,CACC,WAAaJ,CAAAA,CAAAA,CAAY,GACzB,CAAA,SAAA,CAAWG,CAAUC,CAAAA,CAAe,CAAE,CAAA,GAAA,CACtC,IAAMC,CAAAA,CAAAA,CAAK,IACX,IAAME,CAAAA,CAAAA,CAAK,GACX,CAAA,gBAAA,CAAkBC,CAAiB,CAAA,GAAA,CACnC,eAAiBC,CAAAA,CAAAA,CAAgB,IACjC,IAAMC,CAAAA,CAAAA,CAAK,GACX,CAAA,cAAA,CAAgBC,CAAe,CAAA,GAAA,CAC/B,UAAYE,CAAAA,CAAAA,CAAW,GACzB,CCpCF,CAAA,CAAA,IAAMI,CAAuB,CAAA,CAC3B,IAAOb,CAAAA,CAAAA,GAA6B,CAClC,GAAA,CAAKY,CAAuBZ,CAAAA,CAAe,CAC3C,CAAA,MAAA,CAAQU,CAA0BV,CAAAA,CAAe,CACnD,CAAA,CACF,ECMMc,IAAAA,CAAAA,CAAsB,cAEtBC,CAAAA,EAAAA,CACJ,6JAGIC,CAAAA,CAAAA,CAAqBC,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAML,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,eAAiB,CAAA,IAAA,CACjB,KAAOK,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAAA,CAEKS,EAAsB3C,CAAAA,CAAAA,CAGtB4C,EAAN,cAA2B1C,CAAkB,CAC3C,WAAA,CAAYG,CAAkC,CAAA,CAC5C,KAAMgC,CAAAA,CAAAA,CAAoBhC,CAAO,EACnC,CACF,MC1BMwC,CAAsB,CAAA,sBAAA,CAEtBC,EACJ,CAAA,6NAAA,CAGIC,CAAqBT,CAAAA,wBAAAA,CAAgBC,2BAAsBC,CAAAA,4CAAqC,EAAE,KAAM,CAAA,CAC5G,IAAMK,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOL,8BACP,CAAA,UAAA,CAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAEKc,CAAAA,EAAAA,CAAuBhD,CAGvBiD,CAAAA,CAAAA,CAAN,cAA2B/C,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAM0C,CAAoB1C,CAAAA,CAAO,EACnC,CACF,MC/BM6C,CAA8B,CAAA,8BAAA,CAE9BC,EACJ,CAAA,0RAAA,CAIIC,CAA6Bd,CAAAA,wBAAAA,CAAgBC,2BAAsBc,CAAAA,oCAA6B,EAAE,KAAM,CAAA,CAC5G,IAAMH,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOV,8BACP,CAAA,UAAA,CAAYa,gCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKpB,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAEKqB,CAAAA,EAAAA,CAA8BvD,CAG9BwD,CAAAA,CAAAA,CAAN,cAAmCtD,CAAkB,CACnD,WAAYG,CAAAA,CAAAA,CAA0C,CACpD,KAAA,CAAM+C,CAA4B/C,CAAAA,CAAO,EAC3C,CACF,ECtBA,IAAMoD,EAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,oQAGIC,CAAAA,CAAAA,CAAqBrB,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAA,CAAE,MAAM,CAC5G,IAAA,CAAMiB,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOjB,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CACF,CAAC,CAAA,CAEK0B,EAAuB5D,CAAAA,CAAAA,CAGvB6D,CAAN,CAAA,cAA2B3D,CAAkB,CAC3C,WAAA,CAAYG,CAAmC,CAAA,CAC7C,KAAMsD,CAAAA,CAAAA,CAAoBtD,CAAO,EACnC,CACF,EC1BA,IAAMyD,EAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,oQAGIC,CAAAA,CAAAA,CAAqB1B,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAA,CAAE,MAAM,CAC5G,IAAA,CAAMsB,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOtB,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CACF,CAAC,CAAA,CAEK+B,EAAuBjE,CAAAA,CAAAA,CAGvBkE,CAAN,CAAA,cAA2BhE,CAAkB,CAC3C,WAAA,CAAYG,CAAmC,CAAA,CAC7C,KAAM2D,CAAAA,CAAAA,CAAoB3D,CAAO,EACnC,CACF,EC/BM8D,IAAAA,CAAAA,CAA8B,+BAE9BC,EACJ,CAAA,wSAAA,CAIIC,CAA6B/B,CAAAA,wBAAAA,CAAgBC,2BAAsBc,CAAAA,oCAA6B,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAMc,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,MAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAO3B,8BACP,CAAA,UAAA,CAAYa,gCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKpB,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKoC,CAAAA,EAAAA,CAA8BtE,CAG9BuE,CAAAA,CAAAA,CAAN,cAAmCrE,CAAkB,CACnD,WAAYG,CAAAA,CAAAA,CAA0C,CACpD,KAAA,CAAMgE,CAA4BhE,CAAAA,CAAO,EAC3C,CACF,ECtBMmE,IAAAA,CAAAA,CAAqB,kBAErBC,EACJ,CAAA,yKAAA,CAGIC,CAAoBpC,CAAAA,wBAAAA,CAAgBC,2BAAsBC,CAAAA,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC3G,IAAMgC,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,IAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAOhC,8BACP,CAAA,UAAA,CAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKyC,CAAAA,EAAAA,CAAqB3E,CAGrB4E,CAAAA,CAAAA,CAAN,cAA0B1E,CAAkB,CAC1C,WAAYG,CAAAA,CAAAA,CAAiC,CAC3C,KAAA,CAAMqE,CAAmBrE,CAAAA,CAAO,EAClC,CACF,EC1BA,IAAMwE,CAAoB,CAAA,gBAAA,CAEpBC,GACJ,yKAGIC,CAAAA,CAAAA,CAAmBzC,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAA,CAAE,KAAM,CAAA,CAC1G,KAAMqC,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOrC,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAAA,CAEK8C,EAAoBhF,CAAAA,CAAAA,CAGpBiF,CAAN,CAAA,cAAyB/E,CAAkB,CACzC,YAAYG,CAAgC,CAAA,CAC1C,KAAM0E,CAAAA,CAAAA,CAAkB1E,CAAO,EACjC,CACF,EC1BA,IAAM6E,CAAyB,CAAA,kBAAA,CAEzBC,GAA6B,iGAE7BC,CAAAA,CAAAA,CAAwB9C,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAA,CAAE,KAAM,CAAA,CAC/G,KAAM0C,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAO1C,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAAA,CAEKmD,EAAyBrF,CAAAA,CAAAA,CAGzBsF,CAAN,CAAA,cAA8BpF,CAAkB,CAC9C,YAAYG,CAAqC,CAAA,CAC/C,KAAM+E,CAAAA,CAAAA,CAAuB/E,CAAO,EACtC,CACF,MC/BMkF,EAAkB,CAAA,MAAA,CAClB/E,CAAN,CAAA,KAAuI,CAAvI,WAAA,EAAA,CACE,IAAS,CAAA,OAAA,CAAU,KACnB,IAAS,CAAA,IAAA,CAAO+E,EAGhB,CAAA,IAAA,CAAiB,kBAOb,CAAA,CACF,CAAQpD,CAAmB,EAAG,CAC5B,KAAA,CAAcS,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBN,CACtB,CAAA,CACA,CAAQ6C,CAAsB,EAAG,CAC/B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CACA,CAAA,CAAQP,CAAiB,EAAG,CAC1B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQP,CAAkB,EAAG,CAC3B,MAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQ7B,CAAmB,EAAG,CAC5B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQG,CAA2B,EAAG,CACpC,KAAA,CAAcM,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBH,CACtB,CAAA,CACA,CAAQe,CAA2B,EAAG,CACpC,KAAcI,CAAAA,CAAAA,CACd,aAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQP,CAAmB,EAAG,CAC5B,MAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQP,CAAmB,EAAG,CAC5B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CACF,CAEA,CAAA,IAAA,CAAiB,uBAOb,CAAA,GAEJ,CAAA,iBAAA,EAA8B,CAC5B,OAAO,OAAO,IAAK,CAAA,IAAA,CAAK,kBAAkB,CAC5C,CAEA,gBAAA,EAAwD,CACtD,OAAO,OAAO,IAAK,CAAA,IAAA,CAAK,kBAAkB,CAAA,CAAE,MAC1C,CAAA,CAAC6B,CAAKC,CAAAA,CAAAA,IACJD,EAAIC,CAAG,CAAA,CAAI,IAAK,CAAA,kBAAA,CAAmBA,CAAG,CAAA,CAAE,WACjCD,CAAAA,CAAAA,CAAAA,CAET,EACF,CACF,CAEA,SAAA,CAAUnF,CAAyB,CAAA,CACjC,IAAMqF,CAAAA,CAAYrF,EAAQ,SAC1B,CAAA,GAAI,EAAEqF,CAAAA,IAAa,IAAK,CAAA,kBAAA,CAAA,CACtB,MAAM,IAAIC,uBAAc,CACtB,IAAA,CAAM,CAAoBD,iBAAAA,EAAAA,CAAS,CACnC,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,oBAAoBA,CAAS,CAAA;AAAA,UAAA,EAC1C,KAAK,iBAAkB,EAAA,CAAE,KAAK,IAAI,CAAC,EAAE,CAC3C,CAAC,CAGH,CAAA,IAAME,EAAQ,IAAK,CAAA,kBAAA,CAAmBF,CAAS,CAAE,CAAA,KAAA,CAC3CpF,EAAgB,IAAK,CAAA,kBAAA,CAAmBoF,CAAS,CAAA,CAAE,aAAa,KAAMrF,CAAAA,CAAO,EACnF,OAAO,IAAIuF,EAAMtF,CAAa,CAChC,CAEA,sBAAA,EAAmC,CACjC,OAAO,MAAA,CAAO,KAAK,IAAK,CAAA,uBAAuB,CACjD,CAEA,qBAAA,EAAkE,CAChE,OAAO,OAAO,IAAK,CAAA,IAAA,CAAK,uBAAuB,CAAE,CAAA,MAAA,CAC/C,CAACkF,CAAKC,CAAAA,CAAAA,IACJD,CAAIC,CAAAA,CAAG,EAAI,IAAK,CAAA,uBAAA,CAAwBA,CAAG,CAAE,CAAA,WAAA,CACtCD,GAET,EACF,CACF,CAGA,eAAenF,CAA8B,CAAA,CAC3C,MAAM,IAAIsF,sBAAAA,CAAc,CACtB,IAAM,CAAA,4CAAA,CACN,MAAO,IAAI,KAAA,CAAM,4CAA4C,CAC/D,CAAC,CACH,CACF,EAvHMnF,EAGY,OAAU,CAAA,gCAAA","file":"index.js","sourcesContent":["import { z } from \"zod\";\n\nimport { BaseChatModel, OpenAIChatRequestMessageType } from \"@adaline/openai\";\nimport { ChatModelSchemaType, InvalidMessagesError, ParamsType } from \"@adaline/provider\";\nimport { MessageType } from \"@adaline/types\";\n\nimport { Groq } from \"../../provider\";\n\nconst BaseChatModelOptions = z.object({\n modelName: z.string().min(1),\n apiKey: z.string().min(1),\n});\ntype BaseChatModelOptionsType = z.infer<typeof BaseChatModelOptions>;\n\nclass BaseChatModelGroq extends BaseChatModel {\n readonly version = \"v1\" as const;\n modelSchema: ChatModelSchemaType;\n readonly modelName: string;\n\n private readonly groqApiKey: string;\n\n constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType) {\n const parsedOptions = BaseChatModelOptions.parse(options);\n const baseUrl = Groq.baseUrl;\n super(modelSchema, {\n modelName: parsedOptions.modelName,\n apiKey: parsedOptions.apiKey,\n baseUrl: baseUrl,\n completeChatUrl: `${baseUrl}/chat/completions`,\n streamChatUrl: `${baseUrl}/chat/completions`,\n });\n this.modelSchema = modelSchema;\n this.modelName = parsedOptions.modelName;\n this.groqApiKey = parsedOptions.apiKey;\n }\n\n transformMessages(messages: MessageType[]): ParamsType {\n const hasSystemRole = messages.some((msg) => msg.role === \"system\");\n const hasImageModality = messages.some((msg) => msg.content.some((content: any) => content.modality === \"image\"));\n\n if (hasSystemRole && hasImageModality) {\n throw new InvalidMessagesError({\n info: `Invalid message content for model : '${this.modelName}'`,\n cause: new Error(\"Prompting with images is incompatible with system messages`)\"),\n });\n }\n\n const transformedMessages = super.transformMessages(messages) as { messages: OpenAIChatRequestMessageType[] };\n\n // Groq expects the content to be a string for system and assistant messages\n // OpenAI transformer takes care of validating role and modality\n transformedMessages.messages.forEach((message) => {\n if (message.role === \"system\") {\n if (typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n } else if (message.role === \"assistant\") {\n if (message.content && typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n }\n });\n\n return transformedMessages;\n }\n}\n\nexport { BaseChatModelGroq, BaseChatModelOptions, type BaseChatModelOptionsType };\n","import { CHAT_CONFIG, MultiStringConfigItem, RangeConfigItem, SelectStringConfigItem } from \"@adaline/provider\";\n\nconst temperature = RangeConfigItem({\n param: \"temperature\",\n title: CHAT_CONFIG.TEMPERATURE.title,\n description: CHAT_CONFIG.TEMPERATURE.description,\n min: 0,\n max: 2,\n step: 0.01,\n default: 1,\n});\n\nconst maxTokens = (maxOutputTokens: number) =>\n RangeConfigItem({\n param: \"max_tokens\",\n title: CHAT_CONFIG.MAX_TOKENS.title,\n description: CHAT_CONFIG.MAX_TOKENS.description,\n min: 0,\n max: maxOutputTokens,\n step: 1,\n default: 0,\n });\n\nconst stop = MultiStringConfigItem({\n param: \"stop\",\n title: CHAT_CONFIG.STOP(4).title,\n description: CHAT_CONFIG.STOP(4).description,\n max: 4,\n});\n\nconst topP = RangeConfigItem({\n param: \"top_p\",\n title: CHAT_CONFIG.TOP_P.title,\n description: CHAT_CONFIG.TOP_P.description,\n min: 0,\n max: 1,\n step: 0.01,\n default: 1,\n});\n\nconst frequencyPenalty = RangeConfigItem({\n param: \"frequency_penalty\",\n title: CHAT_CONFIG.FREQUENCY_PENALTY.title,\n description: CHAT_CONFIG.FREQUENCY_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst presencePenalty = RangeConfigItem({\n param: \"presence_penalty\",\n title: CHAT_CONFIG.PRESENCE_PENALTY.title,\n description: CHAT_CONFIG.PRESENCE_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst seed = RangeConfigItem({\n param: \"seed\",\n title: CHAT_CONFIG.SEED.title,\n description: CHAT_CONFIG.SEED.description,\n min: 0,\n max: 1000000,\n step: 1,\n default: 0,\n});\n\nconst responseFormat = SelectStringConfigItem({\n param: \"response_format\",\n title: CHAT_CONFIG.RESPONSE_FORMAT.title,\n description: CHAT_CONFIG.RESPONSE_FORMAT.description,\n default: \"text\",\n choices: [\"text\", \"json_object\"],\n});\n\nconst toolChoice = SelectStringConfigItem({\n param: \"tool_choice\",\n title: \"Tool choice\",\n description:\n \"Controls which (if any) tool is called by the model. \\\n 'none' means the model will not call a function. \\\n 'auto' means the model can pick between generating a message or calling a tool.\",\n default: \"auto\",\n choices: [\"auto\", \"required\", \"none\"],\n});\n\nexport { frequencyPenalty, maxTokens, presencePenalty, seed, stop, temperature, toolChoice, topP, responseFormat };\n","import { z } from \"zod\";\n\nimport {\n frequencyPenalty,\n maxTokens,\n presencePenalty,\n responseFormat,\n seed,\n stop,\n temperature,\n toolChoice,\n topP,\n} from \"./common.config.chat-model.groq\";\n\nconst ChatModelBaseConfigSchema = (maxOutputTokens: number) =>\n z.object({\n temperature: temperature.schema,\n maxTokens: maxTokens(maxOutputTokens).schema,\n stop: stop.schema,\n topP: topP.schema,\n frequencyPenalty: frequencyPenalty.schema,\n presencePenalty: presencePenalty.schema,\n seed: seed.schema.transform((value) => (value === 0 ? undefined : value)),\n responseFormat: responseFormat.schema,\n toolChoice: toolChoice.schema,\n });\n\nconst ChatModelBaseConfigDef = (maxOutputTokens: number) =>\n ({\n temperature: temperature.def,\n maxTokens: maxTokens(maxOutputTokens).def,\n stop: stop.def,\n topP: topP.def,\n frequencyPenalty: frequencyPenalty.def,\n presencePenalty: presencePenalty.def,\n seed: seed.def,\n responseFormat: responseFormat.def,\n toolChoice: toolChoice.def,\n }) as const;\n\nexport { ChatModelBaseConfigDef, ChatModelBaseConfigSchema };\n","import { ChatModelBaseConfigDef, ChatModelBaseConfigSchema } from \"./chat-model\";\n\nconst GroqChatModelConfigs = {\n base: (maxOutputTokens: number) => ({\n def: ChatModelBaseConfigDef(maxOutputTokens),\n schema: ChatModelBaseConfigSchema(maxOutputTokens),\n }),\n} as const;\n\nexport { GroqChatModelConfigs };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Gemma2_9b_ITLiteral = \"gemma2-9b-it\" as const;\n// https://huggingface.co/google/gemma-2-9b-it\nconst Gemma2_9b_ITDescription =\n \"Gemma is a family of lightweight, state-of-the-art open models from Google, \\\n built from the same research and technology used to create the Gemini models.\";\n\nconst Gemma2_9b_ITSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Gemma2_9b_ITLiteral,\n description: Gemma2_9b_ITDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Gemma2_9b_ITOptions = BaseChatModelOptions;\ntype Gemma2_9b_ITOptionsType = z.infer<typeof Gemma2_9b_ITOptions>;\n\nclass Gemma2_9b_IT extends BaseChatModelGroq {\n constructor(options: Gemma2_9b_ITOptionsType) {\n super(Gemma2_9b_ITSchema, options);\n }\n}\n\nexport { Gemma2_9b_IT, Gemma2_9b_ITOptions, Gemma2_9b_ITSchema, Gemma2_9b_ITLiteral, type Gemma2_9b_ITOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_8bLiteral = \"llama-3.1-8b-instant\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_8bDescription =\n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_8bLiteral,\n description: Llama_3_1_8bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_8b_Options = BaseChatModelOptions;\ntype Llama_3_1_8b_OptionsType = z.infer<typeof Llama_3_1_8b_Options>;\n\nclass Llama_3_1_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_8b_OptionsType) {\n super(Llama_3_1_8bSchema, options);\n }\n}\n\nexport { Llama_3_1_8b, Llama_3_1_8b_Options, Llama_3_1_8bSchema, Llama_3_1_8bLiteral, type Llama_3_1_8b_OptionsType };\n","import { z } from \"zod\";\n\nimport { OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap } from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_11b_VisionLiteral = \"llama-3.2-11b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-11B-Vision\nconst Llama_3_2_11b_VisionDescription =\n \"The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, \\\n and answering general questions about an image. \\\n The models outperform many of the available open source and closed multimodal models on common industry benchmarks.\";\n\nconst Llama_3_2_11b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_11b_VisionLiteral,\n description: Llama_3_2_11b_VisionDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_11b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_11b_VisionOptionsType = z.infer<typeof Llama_3_2_11b_VisionOptions>;\n\nclass Llama_3_2_11b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_11b_VisionOptionsType) {\n super(Llama_3_2_11b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n type Llama_3_2_11b_VisionOptionsType,\n};\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_1bLiteral = \"llama-3.2-1b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-1B\nconst Llama_3_2_1bDescription =\n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_1bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_1bLiteral,\n description: Llama_3_2_1bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_1b_Options = BaseChatModelOptions;\ntype Llama_3_2_1b_OptionsType = z.infer<typeof Llama_3_2_1b_Options>;\n\nclass Llama_3_2_1b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_1b_OptionsType) {\n super(Llama_3_2_1bSchema, options);\n }\n}\n\nexport { Llama_3_2_1b, Llama_3_2_1b_Options, Llama_3_2_1bSchema, Llama_3_2_1bLiteral, type Llama_3_2_1b_OptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_3bLiteral = \"llama-3.2-3b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-3B\nconst Llama_3_2_3bDescription =\n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_3bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_3bLiteral,\n description: Llama_3_2_3bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_3b_Options = BaseChatModelOptions;\ntype Llama_3_2_3b_OptionsType = z.infer<typeof Llama_3_2_3b_Options>;\n\nclass Llama_3_2_3b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_3b_OptionsType) {\n super(Llama_3_2_3bSchema, options);\n }\n}\n\nexport { Llama_3_2_3b, Llama_3_2_3b_Options, Llama_3_2_3bSchema, Llama_3_2_3bLiteral, type Llama_3_2_3b_OptionsType };\n","import { z } from \"zod\";\n\nimport { OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap } from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_90b_VisionLiteral = \"llama-3.2-90b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-90B-Vision\nconst Llama_3_2_90b_VisionDescription =\n \"The Llama 3.2-90B Vision instruction-tuned models are optimized for advanced visual recognition, \\\n complex image reasoning, detailed captioning, and answering intricate questions about images. \\\n These models achieve state-of-the-art results on multiple industry benchmarks for multimodal tasks.\";\n\nconst Llama_3_2_90b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_90b_VisionLiteral,\n description: Llama_3_2_90b_VisionDescription,\n maxInputTokens: 131072,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_90b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_90b_VisionOptionsType = z.infer<typeof Llama_3_2_90b_VisionOptions>;\n\nclass Llama_3_2_90b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_90b_VisionOptionsType) {\n super(Llama_3_2_90b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_90b_Vision,\n Llama_3_2_90b_VisionLiteral,\n Llama_3_2_90b_VisionOptions,\n Llama_3_2_90b_VisionSchema,\n type Llama_3_2_90b_VisionOptionsType,\n};\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70bLiteral = \"llama3-70b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct\nconst Llama_3_70bDescription =\n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70bLiteral,\n description: Llama_3_70bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70bOptions = BaseChatModelOptions;\ntype Llama_3_70bOptionsType = z.infer<typeof Llama_3_70bOptions>;\n\nclass Llama_3_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_70bOptionsType) {\n super(Llama_3_70bSchema, options);\n }\n}\n\nexport { Llama_3_70b, Llama_3_70bOptions, Llama_3_70bSchema, Llama_3_70bLiteral, type Llama_3_70bOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8bLiteral = \"llama3-8b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct\nconst Llama_3_8bDescription =\n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8bLiteral,\n description: Llama_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8bOptions = BaseChatModelOptions;\ntype Llama_3_8bOptionsType = z.infer<typeof Llama_3_8bOptions>;\n\nclass Llama_3_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_8bOptionsType) {\n super(Llama_3_8bSchema, options);\n }\n}\n\nexport { Llama_3_8b, Llama_3_8bOptions, Llama_3_8bSchema, Llama_3_8bLiteral, type Llama_3_8bOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst LlamaGuard_3_8bLiteral = \"llama-guard-3-8b\" as const;\n// https://huggingface.co/meta-llama/Llama-Guard-3-8B\nconst LlamaGuard_3_8bDescription = \"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.\";\n\nconst LlamaGuard_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: LlamaGuard_3_8bLiteral,\n description: LlamaGuard_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst LlamaGuard_3_8bOptions = BaseChatModelOptions;\ntype LlamaGuard_3_8bOptionsType = z.infer<typeof LlamaGuard_3_8bOptions>;\n\nclass LlamaGuard_3_8b extends BaseChatModelGroq {\n constructor(options: LlamaGuard_3_8bOptionsType) {\n super(LlamaGuard_3_8bSchema, options);\n }\n}\n\nexport { LlamaGuard_3_8b, LlamaGuard_3_8bOptions, LlamaGuard_3_8bSchema, LlamaGuard_3_8bLiteral, type LlamaGuard_3_8bOptionsType };\n","import { z } from \"zod\";\n\nimport { ChatModelSchemaType, ChatModelV1, EmbeddingModelSchemaType, EmbeddingModelV1, ProviderError, ProviderV1 } from \"@adaline/provider\";\n\nimport * as Models from \"../models\";\n\nconst ProviderLiteral = \"groq\";\nclass Groq<C extends Models.BaseChatModelOptionsType, E extends Record<string, any> = Record<string, any>> implements ProviderV1<C, E> {\n readonly version = \"v1\" as const;\n readonly name = ProviderLiteral;\n static readonly baseUrl = \"https://api.groq.com/openai/v1\";\n\n private readonly chatModelFactories: Record<\n string,\n {\n model: { new (options: any): ChatModelV1 };\n modelOptions: z.ZodType<any>;\n modelSchema: ChatModelSchemaType;\n }\n > = {\n [Models.Gemma2_9b_ITLiteral]: {\n model: Models.Gemma2_9b_IT,\n modelOptions: Models.Gemma2_9b_ITOptions,\n modelSchema: Models.Gemma2_9b_ITSchema,\n },\n [Models.LlamaGuard_3_8bLiteral]: {\n model: Models.LlamaGuard_3_8b,\n modelOptions: Models.LlamaGuard_3_8bOptions,\n modelSchema: Models.LlamaGuard_3_8bSchema,\n },\n [Models.Llama_3_8bLiteral]: {\n model: Models.Llama_3_8b,\n modelOptions: Models.Llama_3_8bOptions,\n modelSchema: Models.Llama_3_8bSchema,\n },\n [Models.Llama_3_70bLiteral]: {\n model: Models.Llama_3_70b,\n modelOptions: Models.Llama_3_70bOptions,\n modelSchema: Models.Llama_3_70bSchema,\n },\n [Models.Llama_3_1_8bLiteral]: {\n model: Models.Llama_3_1_8b,\n modelOptions: Models.Llama_3_1_8b_Options,\n modelSchema: Models.Llama_3_1_8bSchema,\n },\n [Models.Llama_3_2_11b_VisionLiteral]: {\n model: Models.Llama_3_2_11b_Vision,\n modelOptions: Models.Llama_3_2_11b_VisionOptions,\n modelSchema: Models.Llama_3_2_11b_VisionSchema,\n },\n [Models.Llama_3_2_90b_VisionLiteral]: {\n model: Models.Llama_3_2_90b_Vision,\n modelOptions: Models.Llama_3_2_90b_VisionOptions,\n modelSchema: Models.Llama_3_2_90b_VisionSchema,\n },\n [Models.Llama_3_2_3bLiteral]: {\n model: Models.Llama_3_2_3b,\n modelOptions: Models.Llama_3_2_3b_Options,\n modelSchema: Models.Llama_3_2_3bSchema,\n },\n [Models.Llama_3_2_1bLiteral]: {\n model: Models.Llama_3_2_1b,\n modelOptions: Models.Llama_3_2_1b_Options,\n modelSchema: Models.Llama_3_2_1bSchema,\n },\n };\n\n private readonly embeddingModelFactories: Record<\n string,\n {\n model: { new (options: any): EmbeddingModelV1 };\n modelOptions: z.ZodType<any>;\n modelSchema: EmbeddingModelSchemaType;\n }\n > = {};\n\n chatModelLiterals(): string[] {\n return Object.keys(this.chatModelFactories);\n }\n\n chatModelSchemas(): Record<string, ChatModelSchemaType> {\n return Object.keys(this.chatModelFactories).reduce(\n (acc, key) => {\n acc[key] = this.chatModelFactories[key].modelSchema;\n return acc;\n },\n {} as Record<string, ChatModelSchemaType>\n );\n }\n\n chatModel(options: C): ChatModelV1 {\n const modelName = options.modelName;\n if (!(modelName in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${modelName} not found`,\n cause: new Error(`Groq chat model: ${modelName} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.chatModelFactories[modelName].model;\n const parsedOptions = this.chatModelFactories[modelName].modelOptions.parse(options);\n return new model(parsedOptions);\n }\n\n embeddingModelLiterals(): string[] {\n return Object.keys(this.embeddingModelFactories);\n }\n\n embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType> {\n return Object.keys(this.embeddingModelFactories).reduce(\n (acc, key) => {\n acc[key] = this.embeddingModelFactories[key].modelSchema;\n return acc;\n },\n {} as Record<string, EmbeddingModelSchemaType>\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n embeddingModel(options: E): EmbeddingModelV1 {\n throw new ProviderError({\n info: \"Groq does not support embedding models yet\",\n cause: new Error(\"Groq does not support embedding models yet\"),\n });\n }\n}\n\nexport { Groq };\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/models/pricing.json","../src/models/chat-models/base-chat-model.groq.ts","../src/configs/chat-model/common.config.chat-model.groq.ts","../src/configs/chat-model/base.config.chat-model.groq.ts","../src/configs/configs.groq.ts","../src/models/chat-models/gemma2-9b-it.groq.ts","../src/models/chat-models/llama-3-1-8b.groq.ts","../src/models/chat-models/llama-3-2-11b-vision.groq.ts","../src/models/chat-models/llama-3-2-1b.groq.ts","../src/models/chat-models/llama-3-2-3b.groq.ts","../src/models/chat-models/llama-3-2-90b-vision.groq.ts","../src/models/chat-models/llama-3-70b.groq.ts","../src/models/chat-models/llama-3-8b.groq.ts","../src/models/chat-models/llama-guard-3-8b.groq.ts","../src/provider/provider.groq.ts"],"names":["pricing_default","BaseChatModelOptions","z","BaseChatModelGroq","BaseChatModel","modelSchema","options","parsedOptions","baseUrl","Groq","messages","hasSystemRole","msg","hasImageModality","content","InvalidMessagesError","transformedMessages","message","ModelResponseError","temperature","RangeConfigItem","CHAT_CONFIG","maxTokens","maxOutputTokens","stop","MultiStringConfigItem","topP","frequencyPenalty","presencePenalty","seed","responseFormat","SelectStringConfigItem","toolChoice","ChatModelBaseConfigSchema","value","ChatModelBaseConfigDef","GroqChatModelConfigs","Gemma2_9b_ITLiteral","Gemma2_9b_ITDescription","Gemma2_9b_ITSchema","ChatModelSchema","OpenAIChatModelRoles","OpenAIChatModelTextToolModalitiesEnum","OpenAIChatModelRolesMap","OpenAIChatModelTextToolModalities","Gemma2_9b_ITOptions","Gemma2_9b_IT","Llama_3_1_8bLiteral","Llama_3_1_8bDescription","Llama_3_1_8bSchema","Llama_3_1_8b_Options","Llama_3_1_8b","Llama_3_2_11b_VisionLiteral","Llama_3_2_11b_VisionDescription","Llama_3_2_11b_VisionSchema","OpenAIChatModelModalitiesEnum","OpenAIChatModelModalities","Llama_3_2_11b_VisionOptions","Llama_3_2_11b_Vision","Llama_3_2_1bLiteral","Llama_3_2_1bDescription","Llama_3_2_1bSchema","Llama_3_2_1b_Options","Llama_3_2_1b","Llama_3_2_3bLiteral","Llama_3_2_3bDescription","Llama_3_2_3bSchema","Llama_3_2_3b_Options","Llama_3_2_3b","Llama_3_2_90b_VisionLiteral","Llama_3_2_90b_VisionDescription","Llama_3_2_90b_VisionSchema","Llama_3_2_90b_VisionOptions","Llama_3_2_90b_Vision","Llama_3_70bLiteral","Llama_3_70bDescription","Llama_3_70bSchema","Llama_3_70bOptions","Llama_3_70b","Llama_3_8bLiteral","Llama_3_8bDescription","Llama_3_8bSchema","Llama_3_8bOptions","Llama_3_8b","LlamaGuard_3_8bLiteral","LlamaGuard_3_8bDescription","LlamaGuard_3_8bSchema","LlamaGuard_3_8bOptions","LlamaGuard_3_8b","ProviderLiteral","acc","key","modelName","ProviderError","model"],"mappings":";;;;;;AAAA,IAAAA,CAAAA,CAAA,CACE,cAAA,CAAgB,CACd,SAAA,CAAa,eACb,QAAY,CAAA,KAAA,CACZ,YAAe,CACb,CACE,UAAa,CACb,CAAA,SAAA,CAAa,KACb,MAAU,CAAA,CACR,KAAQ,CACN,oBAAA,CAAwB,GACxB,qBAAyB,CAAA,EAC3B,CACF,CACF,CACF,CACF,CAAA,CACA,sBAAwB,CAAA,CACtB,UAAa,sBACb,CAAA,QAAA,CAAY,MACZ,WAAe,CAAA,CACb,CACE,SAAa,CAAA,CAAA,CACb,UAAa,IACb,CAAA,MAAA,CAAU,CACR,IAAQ,CAAA,CACN,qBAAwB,GACxB,CAAA,qBAAA,CAAyB,GAC3B,CACF,CACF,CACF,CACF,CACA,CAAA,8BAAA,CAAgC,CAC9B,SAAa,CAAA,8BAAA,CACb,SAAY,KACZ,CAAA,WAAA,CAAe,CACb,CACE,SAAA,CAAa,CACb,CAAA,SAAA,CAAa,IACb,CAAA,MAAA,CAAU,CACR,IAAQ,CAAA,CACN,qBAAwB,GACxB,CAAA,qBAAA,CAAyB,GAC3B,CACF,CACF,CACF,CACF,CACA,CAAA,sBAAA,CAAwB,CACtB,SAAa,CAAA,sBAAA,CACb,SAAY,KACZ,CAAA,WAAA,CAAe,CACb,CACE,SAAA,CAAa,EACb,SAAa,CAAA,IAAA,CACb,OAAU,CACR,IAAA,CAAQ,CACN,oBAAwB,CAAA,GAAA,CACxB,sBAAyB,GAC3B,CACF,CACF,CACF,CACF,CAAA,CACA,uBAAwB,CACtB,SAAA,CAAa,uBACb,QAAY,CAAA,KAAA,CACZ,YAAe,CACb,CACE,UAAa,CACb,CAAA,SAAA,CAAa,KACb,MAAU,CAAA,CACR,KAAQ,CACN,oBAAA,CAAwB,IACxB,qBAAyB,CAAA,GAC3B,CACF,CACF,CACF,CACF,EACA,8BAAgC,CAAA,CAC9B,UAAa,8BACb,CAAA,QAAA,CAAY,MACZ,WAAe,CAAA,CACb,CACE,SAAa,CAAA,CAAA,CACb,UAAa,IACb,CAAA,MAAA,CAAU,CACR,IAAQ,CAAA,CACN,qBAAwB,EACxB,CAAA,qBAAA,CAAyB,EAC3B,CACF,CACF,CACF,CACF,CACA,CAAA,iBAAA,CAAmB,CACjB,SAAa,CAAA,iBAAA,CACb,SAAY,KACZ,CAAA,WAAA,CAAe,CACb,CACE,SAAa,CAAA,CAAA,CACb,UAAa,IACb,CAAA,MAAA,CAAU,CACR,IAAQ,CAAA,CACN,qBAAwB,GACxB,CAAA,qBAAA,CAAyB,GAC3B,CACF,CACF,CACF,CACF,CACA,CAAA,gBAAA,CAAkB,CAChB,SAAa,CAAA,gBAAA,CACb,SAAY,KACZ,CAAA,WAAA,CAAe,CACb,CACE,SAAA,CAAa,EACb,SAAa,CAAA,IAAA,CACb,OAAU,CACR,IAAA,CAAQ,CACN,oBAAwB,CAAA,GAAA,CACxB,qBAAyB,CAAA,GAC3B,CACF,CACF,CACF,CACF,CAAA,CACA,mBAAoB,CAClB,SAAA,CAAa,mBACb,QAAY,CAAA,KAAA,CACZ,YAAe,CACb,CACE,UAAa,CACb,CAAA,SAAA,CAAa,KACb,MAAU,CAAA,CACR,KAAQ,CACN,oBAAA,CAAwB,EACxB,CAAA,qBAAA,CAAyB,EAC3B,CACF,CACF,CACF,CACF,CACF,CCxIA,CAAA,IAAMC,EAAuBC,KAAE,CAAA,MAAA,CAAO,CACpC,SAAWA,CAAAA,KAAAA,CAAE,QAAS,CAAA,GAAA,CAAI,CAAC,CAC3B,CAAA,MAAA,CAAQA,MAAE,MAAO,EAAA,CAAE,GAAI,CAAA,CAAC,CAC1B,CAAC,EAGKC,CAAN,CAAA,cAAgCC,oBAAc,CAO5C,WAAA,CAAYC,EAAkCC,CAAmC,CAAA,CAC/E,IAAMC,CAAAA,CAAgBN,CAAqB,CAAA,KAAA,CAAMK,CAAO,CAClDE,CAAAA,CAAAA,CAAUC,EAAK,OACrB,CAAA,KAAA,CAAMJ,EAAa,CACjB,SAAA,CAAWE,CAAc,CAAA,SAAA,CACzB,MAAQA,CAAAA,CAAAA,CAAc,OACtB,OAASC,CAAAA,CAAAA,CACT,gBAAiB,CAAGA,EAAAA,CAAO,oBAC3B,aAAe,CAAA,CAAA,EAAGA,CAAO,CAC3B,iBAAA,CAAA,CAAC,EAfH,IAAS,CAAA,OAAA,CAAU,KAgBjB,IAAK,CAAA,WAAA,CAAcH,EACnB,IAAK,CAAA,SAAA,CAAYE,CAAc,CAAA,SAAA,CAC/B,IAAK,CAAA,UAAA,CAAaA,EAAc,OAClC,CAEA,kBAAkBG,CAAqC,CAAA,CACrD,IAAMC,CAAgBD,CAAAA,CAAAA,CAAS,IAAME,CAAAA,CAAAA,EAAQA,CAAI,CAAA,IAAA,GAAS,QAAQ,CAC5DC,CAAAA,CAAAA,CAAmBH,EAAS,IAAME,CAAAA,CAAAA,EAAQA,EAAI,OAAQ,CAAA,IAAA,CAAME,CAAiBA,EAAAA,CAAAA,CAAQ,QAAa,GAAA,OAAO,CAAC,CAEhH,CAAA,GAAIH,GAAiBE,CACnB,CAAA,MAAM,IAAIE,6BAAqB,CAAA,CAC7B,KAAM,CAAwC,qCAAA,EAAA,IAAA,CAAK,SAAS,CAC5D,CAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAM,CAAA,8DAA8D,CACjF,CAAC,CAAA,CAGH,IAAMC,CAAAA,CAAsB,KAAM,CAAA,iBAAA,CAAkBN,CAAQ,CAI5D,CAAA,OAAAM,EAAoB,QAAS,CAAA,OAAA,CAASC,GAAY,CAC5CA,CAAAA,CAAQ,IAAS,GAAA,QAAA,CACf,OAAOA,CAAAA,CAAQ,SAAY,QAC7BA,GAAAA,CAAAA,CAAQ,QAAUA,CAAQ,CAAA,OAAA,CAAQ,IAAKH,CAAYA,EAAAA,CAAAA,CAAQ,IAAI,CAAA,CAAE,IAAK,CAAA,CAAA;AAAA,CAAI,GAEnEG,CAAQ,CAAA,IAAA,GAAS,aACtBA,CAAQ,CAAA,OAAA,EAAW,OAAOA,CAAQ,CAAA,OAAA,EAAY,WAChDA,CAAQ,CAAA,OAAA,CAAUA,EAAQ,OAAQ,CAAA,GAAA,CAAKH,GAAYA,CAAQ,CAAA,IAAI,EAAE,IAAK,CAAA,CAAA;AAAA,CAAI,CAGhF,EAAA,CAAC,CAEME,CAAAA,CACT,CACA,eAAsC,EAAA,CAEpC,GAAI,EAAE,IAAK,CAAA,SAAA,IAAahB,CACtB,CAAA,CAAA,MAAM,IAAIkB,2BAAmB,CAAA,CAC3B,IAAM,CAAA,CAAA,mCAAA,EAAsC,IAAK,CAAA,SAAS,CAC1D,CAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAM,CAAA,CAAA,0CAAA,EAA6C,IAAK,CAAA,SAAS,GAAG,CACjF,CAAC,CAIH,CAAA,OADclB,EAAY,IAAK,CAAA,SAAqC,CAEtE,CACF,EC5EA,IAAMmB,CAAcC,CAAAA,wBAAAA,CAAgB,CAClC,KAAO,CAAA,aAAA,CACP,KAAOC,CAAAA,oBAAAA,CAAY,WAAY,CAAA,KAAA,CAC/B,WAAaA,CAAAA,oBAAAA,CAAY,YAAY,WACrC,CAAA,GAAA,CAAK,CACL,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,CAAA,CAEKC,CAAaC,CAAAA,CAAAA,EACjBH,wBAAgB,CAAA,CACd,KAAO,CAAA,YAAA,CACP,MAAOC,oBAAY,CAAA,UAAA,CAAW,KAC9B,CAAA,WAAA,CAAaA,qBAAY,UAAW,CAAA,WAAA,CACpC,GAAK,CAAA,CAAA,CACL,IAAKE,CACL,CAAA,IAAA,CAAM,CACN,CAAA,OAAA,CAAS,CACX,CAAC,CAEGC,CAAAA,CAAAA,CAAOC,+BAAsB,CACjC,KAAA,CAAO,MACP,CAAA,KAAA,CAAOJ,qBAAY,IAAK,CAAA,CAAC,CAAE,CAAA,KAAA,CAC3B,YAAaA,oBAAY,CAAA,IAAA,CAAK,CAAC,CAAA,CAAE,WACjC,CAAA,GAAA,CAAK,CACP,CAAC,EAEKK,CAAON,CAAAA,wBAAAA,CAAgB,CAC3B,KAAA,CAAO,QACP,KAAOC,CAAAA,oBAAAA,CAAY,KAAM,CAAA,KAAA,CACzB,YAAaA,oBAAY,CAAA,KAAA,CAAM,WAC/B,CAAA,GAAA,CAAK,CACL,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,IACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKM,CAAmBP,CAAAA,wBAAAA,CAAgB,CACvC,KAAA,CAAO,oBACP,KAAOC,CAAAA,oBAAAA,CAAY,iBAAkB,CAAA,KAAA,CACrC,WAAaA,CAAAA,oBAAAA,CAAY,iBAAkB,CAAA,WAAA,CAC3C,IAAK,CACL,CAAA,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,IACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKO,EAAkBR,wBAAgB,CAAA,CACtC,KAAO,CAAA,kBAAA,CACP,KAAOC,CAAAA,oBAAAA,CAAY,gBAAiB,CAAA,KAAA,CACpC,YAAaA,oBAAY,CAAA,gBAAA,CAAiB,WAC1C,CAAA,GAAA,CAAK,CACL,CAAA,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,IACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKQ,CAAOT,CAAAA,wBAAAA,CAAgB,CAC3B,KAAA,CAAO,OACP,KAAOC,CAAAA,oBAAAA,CAAY,IAAK,CAAA,KAAA,CACxB,YAAaA,oBAAY,CAAA,IAAA,CAAK,WAC9B,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,GAAA,CACL,IAAM,CAAA,CAAA,CACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKS,EAAiBC,+BAAuB,CAAA,CAC5C,KAAO,CAAA,iBAAA,CACP,KAAOV,CAAAA,oBAAAA,CAAY,eAAgB,CAAA,KAAA,CACnC,YAAaA,oBAAY,CAAA,eAAA,CAAgB,WACzC,CAAA,OAAA,CAAS,MACT,CAAA,OAAA,CAAS,CAAC,MAAA,CAAQ,aAAa,CACjC,CAAC,CAEKW,CAAAA,CAAAA,CAAaD,gCAAuB,CACxC,KAAA,CAAO,aACP,CAAA,KAAA,CAAO,cACP,WACE,CAAA,+LAAA,CAGF,OAAS,CAAA,MAAA,CACT,OAAS,CAAA,CAAC,MAAQ,CAAA,UAAA,CAAY,MAAM,CACtC,CAAC,CCzED,CAAA,IAAME,EAA6BV,CACjCrB,EAAAA,KAAAA,CAAE,MAAO,CAAA,CACP,YAAaiB,CAAY,CAAA,MAAA,CACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,MACtC,CAAA,IAAA,CAAMC,EAAK,MACX,CAAA,IAAA,CAAME,CAAK,CAAA,MAAA,CACX,iBAAkBC,CAAiB,CAAA,MAAA,CACnC,eAAiBC,CAAAA,CAAAA,CAAgB,OACjC,IAAMC,CAAAA,CAAAA,CAAK,MAAO,CAAA,SAAA,CAAWK,CAAWA,EAAAA,CAAAA,GAAU,CAAI,CAAA,KAAA,CAAA,CAAYA,CAAM,CACxE,CAAA,cAAA,CAAgBJ,CAAe,CAAA,MAAA,CAC/B,UAAYE,CAAAA,CAAAA,CAAW,MACzB,CAAC,EAEGG,EAA0BZ,CAAAA,CAAAA,GAC7B,CACC,WAAA,CAAaJ,CAAY,CAAA,GAAA,CACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAE,CAAA,GAAA,CACtC,IAAMC,CAAAA,CAAAA,CAAK,IACX,IAAME,CAAAA,CAAAA,CAAK,GACX,CAAA,gBAAA,CAAkBC,EAAiB,GACnC,CAAA,eAAA,CAAiBC,CAAgB,CAAA,GAAA,CACjC,IAAMC,CAAAA,CAAAA,CAAK,GACX,CAAA,cAAA,CAAgBC,EAAe,GAC/B,CAAA,UAAA,CAAYE,CAAW,CAAA,GACzB,CCpCF,CAAA,CAAA,IAAMI,CAAuB,CAAA,CAC3B,KAAOb,CAA6B,GAAA,CAClC,GAAKY,CAAAA,EAAAA,CAAuBZ,CAAe,CAAA,CAC3C,MAAQU,CAAAA,CAAAA,CAA0BV,CAAe,CACnD,CAAA,CACF,CCOA,CAAA,IAAMc,EAAsB,cAEtBC,CAAAA,EAAAA,CACJ,6JAGIC,CAAAA,CAAAA,CAAqBC,yBAAgBC,2BAAsBC,CAAAA,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAML,CACN,CAAA,WAAA,CAAaC,GACb,cAAgB,CAAA,IAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,KAAOK,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,EACA,KAAOpC,CAAAA,CAAAA,CAAYqC,CAAmB,CACxC,CAAC,CAEKQ,CAAAA,EAAAA,CAAsB5C,CAGtB6C,CAAAA,CAAAA,CAAN,cAA2B3C,CAAkB,CAC3C,WAAA,CAAYG,EAAkC,CAC5C,KAAA,CAAMiC,CAAoBjC,CAAAA,CAAO,EACnC,CACF,EC3BMyC,IAAAA,CAAAA,CAAsB,uBAEtBC,EACJ,CAAA,6NAAA,CAGIC,CAAqBT,CAAAA,wBAAAA,CAAgBC,4BAAsBC,4CAAqC,CAAA,CAAE,KAAM,CAAA,CAC5G,IAAMK,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOL,8BACP,CAAA,UAAA,CAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CAAA,CACA,MAAOpC,CAAY+C,CAAAA,CAAmB,CACxC,CAAC,EAEKG,EAAuBjD,CAAAA,CAAAA,CAGvBkD,CAAN,CAAA,cAA2BhD,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAM2C,CAAAA,CAAAA,CAAoB3C,CAAO,EACnC,CACF,EChCA,IAAM8C,CAA8B,CAAA,8BAAA,CAE9BC,GACJ,0RAIIC,CAAAA,CAAAA,CAA6Bd,wBAAgBC,CAAAA,2BAAAA,CAAsBc,oCAA6B,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAMH,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,MAChB,eAAiB,CAAA,IAAA,CACjB,KAAOV,CAAAA,8BAAAA,CACP,UAAYa,CAAAA,gCAAAA,CACZ,MAAQ,CAAA,CACN,IAAKpB,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACA,CAAA,KAAA,CAAOpC,EAAYoD,CAA2B,CAChD,CAAC,CAAA,CAEKK,GAA8BxD,CAG9ByD,CAAAA,CAAAA,CAAN,cAAmCvD,CAAkB,CACnD,WAAA,CAAYG,CAA0C,CAAA,CACpD,MAAMgD,CAA4BhD,CAAAA,CAAO,EAC3C,CACF,ECvBMqD,IAAAA,CAAAA,CAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,qQAGIC,CAAqBrB,CAAAA,wBAAAA,CAAgBC,2BAAsBC,CAAAA,4CAAqC,EAAE,KAAM,CAAA,CAC5G,IAAMiB,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOjB,8BACP,CAAA,UAAA,CAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CAAA,CACA,KAAOpC,CAAAA,CAAAA,CAAY2D,CAAmB,CACxC,CAAC,CAEKG,CAAAA,EAAAA,CAAuB7D,EAGvB8D,CAAN,CAAA,cAA2B5D,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAMuD,EAAoBvD,CAAO,EACnC,CACF,EC3BM0D,IAAAA,CAAAA,CAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,qQAGIC,CAAqB1B,CAAAA,wBAAAA,CAAgBC,2BAAsBC,CAAAA,4CAAqC,EAAE,KAAM,CAAA,CAC5G,IAAMsB,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOtB,8BACP,CAAA,UAAA,CAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CAAA,CACA,KAAOpC,CAAAA,CAAAA,CAAYgE,CAAmB,CACxC,CAAC,CAEKG,CAAAA,EAAAA,CAAuBlE,EAGvBmE,CAAN,CAAA,cAA2BjE,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAM4D,EAAoB5D,CAAO,EACnC,CACF,MChCM+D,CAA8B,CAAA,8BAAA,CAE9BC,EACJ,CAAA,wSAAA,CAIIC,EAA6B/B,wBAAgBC,CAAAA,2BAAAA,CAAsBc,oCAA6B,CAAA,CAAE,MAAM,CAC5G,IAAA,CAAMc,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,MAChB,CAAA,eAAA,CAAiB,KACjB,KAAO3B,CAAAA,8BAAAA,CACP,UAAYa,CAAAA,gCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKpB,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACA,CAAA,KAAA,CAAOpC,CAAYqE,CAAAA,CAA2B,CAChD,CAAC,CAAA,CAEKG,EAA8BvE,CAAAA,CAAAA,CAG9BwE,EAAN,cAAmCtE,CAAkB,CACnD,WAAA,CAAYG,CAA0C,CAAA,CACpD,KAAMiE,CAAAA,CAAAA,CAA4BjE,CAAO,EAC3C,CACF,ECvBA,IAAMoE,EAAqB,iBAErBC,CAAAA,EAAAA,CACJ,yKAGIC,CAAAA,CAAAA,CAAoBpC,yBAAgBC,2BAAsBC,CAAAA,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC3G,IAAMgC,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,IAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAOhC,8BACP,CAAA,UAAA,CAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,EACA,KAAOpC,CAAAA,CAAAA,CAAY0E,CAAkB,CACvC,CAAC,CAEKG,CAAAA,EAAAA,CAAqB5E,CAGrB6E,CAAAA,CAAAA,CAAN,cAA0B3E,CAAkB,CAC1C,WAAYG,CAAAA,CAAAA,CAAiC,CAC3C,KAAA,CAAMsE,CAAmBtE,CAAAA,CAAO,EAClC,CACF,EC3BMyE,IAAAA,CAAAA,CAAoB,iBAEpBC,EACJ,CAAA,yKAAA,CAGIC,CAAmBzC,CAAAA,wBAAAA,CAAgBC,4BAAsBC,4CAAqC,CAAA,CAAE,KAAM,CAAA,CAC1G,KAAMqC,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOrC,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CAAA,CACA,MAAOpC,CAAY+E,CAAAA,CAAiB,CACtC,CAAC,EAEKG,EAAoBjF,CAAAA,CAAAA,CAGpBkF,CAAN,CAAA,cAAyBhF,CAAkB,CACzC,WAAA,CAAYG,CAAgC,CAAA,CAC1C,KAAM2E,CAAAA,CAAAA,CAAkB3E,CAAO,EACjC,CACF,EC3BM8E,IAAAA,CAAAA,CAAyB,mBAEzBC,EAA6B,CAAA,iGAAA,CAE7BC,CAAwB9C,CAAAA,wBAAAA,CAAgBC,4BAAsBC,4CAAqC,CAAA,CAAE,KAAM,CAAA,CAC/G,KAAM0C,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAO1C,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CAAA,CACA,MAAOpC,CAAYoF,CAAAA,CAAsB,CAC3C,CAAC,EAEKG,EAAyBtF,CAAAA,CAAAA,CAGzBuF,CAAN,CAAA,cAA8BrF,CAAkB,CAC9C,WAAA,CAAYG,CAAqC,CAAA,CAC/C,KAAMgF,CAAAA,CAAAA,CAAuBhF,CAAO,EACtC,CACF,ECjCA,IAAMmF,EAAkB,CAAA,MAAA,CAClBhF,EAAN,KAAuI,CAAvI,WACE,EAAA,CAAA,IAAA,CAAS,QAAU,IACnB,CAAA,IAAA,CAAS,IAAOgF,CAAAA,EAAAA,CAGhB,IAAiB,CAAA,kBAAA,CAOb,CACF,CAAQpD,CAAmB,EAAG,CAC5B,KAAcS,CAAAA,CAAAA,CACd,aAAqBD,EACrB,CAAA,WAAA,CAAoBN,CACtB,CAAA,CACA,CAAQ6C,CAAsB,EAAG,CAC/B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CACA,CAAA,CAAQP,CAAiB,EAAG,CAC1B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,GACrB,WAAoBD,CAAAA,CACtB,CACA,CAAA,CAAQP,CAAkB,EAAG,CAC3B,KAAA,CAAcI,EACd,YAAqBD,CAAAA,EAAAA,CACrB,WAAoBD,CAAAA,CACtB,EACA,CAAQ7B,CAAmB,EAAG,CAC5B,MAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQG,CAA2B,EAAG,CACpC,KAAA,CAAcM,CACd,CAAA,YAAA,CAAqBD,GACrB,WAAoBH,CAAAA,CACtB,CACA,CAAA,CAAQe,CAA2B,EAAG,CACpC,KAAcI,CAAAA,CAAAA,CACd,YAAqBD,CAAAA,EAAAA,CACrB,WAAoBD,CAAAA,CACtB,EACA,CAAQP,CAAmB,EAAG,CAC5B,MAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CACA,CAAA,CAAQP,CAAmB,EAAG,CAC5B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,GACrB,WAAoBD,CAAAA,CACtB,CACF,CAAA,CAEA,IAAiB,CAAA,uBAAA,CAOb,GAAC,CAEL,mBAA8B,CAC5B,OAAO,MAAO,CAAA,IAAA,CAAK,IAAK,CAAA,kBAAkB,CAC5C,CAEA,kBAAwD,CACtD,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,kBAAkB,CAAA,CAAE,MAC1C,CAAA,CAAC6B,EAAKC,CACJD,IAAAA,CAAAA,CAAIC,CAAG,CAAA,CAAI,IAAK,CAAA,kBAAA,CAAmBA,CAAG,CAAA,CAAE,YACjCD,CAET,CAAA,CAAA,EACF,CACF,CAEA,SAAUpF,CAAAA,CAAAA,CAAyB,CACjC,IAAMsF,EAAYtF,CAAQ,CAAA,SAAA,CAC1B,GAAI,EAAEsF,CAAa,IAAA,IAAA,CAAK,kBACtB,CAAA,CAAA,MAAM,IAAIC,sBAAc,CAAA,CACtB,IAAM,CAAA,CAAA,iBAAA,EAAoBD,CAAS,CACnC,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,oBAAoBA,CAAS,CAAA;AAAA,UAAA,EAC1C,KAAK,iBAAkB,EAAA,CAAE,KAAK,IAAI,CAAC,EAAE,CAC3C,CAAC,CAGH,CAAA,IAAME,EAAQ,IAAK,CAAA,kBAAA,CAAmBF,CAAS,CAAE,CAAA,KAAA,CAC3CrF,EAAgB,IAAK,CAAA,kBAAA,CAAmBqF,CAAS,CAAA,CAAE,aAAa,KAAMtF,CAAAA,CAAO,EACnF,OAAO,IAAIwF,EAAMvF,CAAa,CAChC,CAEA,sBAAA,EAAmC,CACjC,OAAO,MAAA,CAAO,KAAK,IAAK,CAAA,uBAAuB,CACjD,CAEA,qBAAA,EAAkE,CAChE,OAAO,OAAO,IAAK,CAAA,IAAA,CAAK,uBAAuB,CAAE,CAAA,MAAA,CAC/C,CAACmF,CAAKC,CAAAA,CAAAA,IACJD,CAAIC,CAAAA,CAAG,EAAI,IAAK,CAAA,uBAAA,CAAwBA,CAAG,CAAE,CAAA,WAAA,CACtCD,GAET,EACF,CACF,CAGA,eAAepF,CAA8B,CAAA,CAC3C,MAAM,IAAIuF,sBAAAA,CAAc,CACtB,IAAM,CAAA,4CAAA,CACN,MAAO,IAAI,KAAA,CAAM,4CAA4C,CAC/D,CAAC,CACH,CACF,EAvHMpF,EAGY,OAAU,CAAA,gCAAA","file":"index.js","sourcesContent":["{\n \"gemma2-9b-it\": {\n \"modelName\": \"gemma2-9b-it\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.2,\n \"outputPricePerMillion\": 0.2\n }\n }\n }\n ]\n },\n \"llama-3.1-8b-instant\": {\n \"modelName\": \"llama-3.1-8b-instant\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.05,\n \"outputPricePerMillion\": 0.08\n }\n }\n }\n ]\n },\n \"llama-3.2-11b-vision-preview\": {\n \"modelName\": \"llama-3.2-11b-vision-preview\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.18,\n \"outputPricePerMillion\": 0.18\n }\n }\n }\n ]\n },\n \"llama-3.2-1b-preview\": {\n \"modelName\": \"llama-3.2-1b-preview\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.04,\n \"outputPricePerMillion\": 0.04\n }\n }\n }\n ]\n },\n \"llama-3.2-3b-preview\": {\n \"modelName\": \"llama-3.2-3b-preview\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.06,\n \"outputPricePerMillion\": 0.06\n }\n }\n }\n ]\n },\n \"llama-3.2-90b-vision-preview\": {\n \"modelName\": \"llama-3.2-90b-vision-preview\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.7,\n \"outputPricePerMillion\": 0.8\n }\n }\n }\n ]\n },\n \"llama3-70b-8192\": {\n \"modelName\": \"llama3-70b-8192\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.59,\n \"outputPricePerMillion\": 0.79\n }\n }\n }\n ]\n },\n \"llama3-8b-8192\": {\n \"modelName\": \"llama3-8b-8192\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.05,\n \"outputPricePerMillion\": 0.08\n }\n }\n }\n ]\n },\n \"llama-guard-3-8b\": {\n \"modelName\": \"llama-guard-3-8b\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.2,\n \"outputPricePerMillion\": 0.2\n }\n }\n }\n ]\n }\n}\n","import { z } from \"zod\";\n\nimport { BaseChatModel, OpenAIChatRequestMessageType } from \"@adaline/openai\";\nimport { ChatModelSchemaType, InvalidMessagesError, ModelResponseError, ParamsType } from \"@adaline/provider\";\nimport { ChatModelPriceType, MessageType } from \"@adaline/types\";\n\nimport { Groq } from \"../../provider\";\nimport pricingData from \"../pricing.json\";\n\nconst BaseChatModelOptions = z.object({\n modelName: z.string().min(1),\n apiKey: z.string().min(1),\n});\ntype BaseChatModelOptionsType = z.infer<typeof BaseChatModelOptions>;\n\nclass BaseChatModelGroq extends BaseChatModel {\n readonly version = \"v1\" as const;\n modelSchema: ChatModelSchemaType;\n readonly modelName: string;\n\n private readonly groqApiKey: string;\n\n constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType) {\n const parsedOptions = BaseChatModelOptions.parse(options);\n const baseUrl = Groq.baseUrl;\n super(modelSchema, {\n modelName: parsedOptions.modelName,\n apiKey: parsedOptions.apiKey,\n baseUrl: baseUrl,\n completeChatUrl: `${baseUrl}/chat/completions`,\n streamChatUrl: `${baseUrl}/chat/completions`,\n });\n this.modelSchema = modelSchema;\n this.modelName = parsedOptions.modelName;\n this.groqApiKey = parsedOptions.apiKey;\n }\n\n transformMessages(messages: MessageType[]): ParamsType {\n const hasSystemRole = messages.some((msg) => msg.role === \"system\");\n const hasImageModality = messages.some((msg) => msg.content.some((content: any) => content.modality === \"image\"));\n\n if (hasSystemRole && hasImageModality) {\n throw new InvalidMessagesError({\n info: `Invalid message content for model : '${this.modelName}'`,\n cause: new Error(\"Prompting with images is incompatible with system messages`)\"),\n });\n }\n\n const transformedMessages = super.transformMessages(messages) as { messages: OpenAIChatRequestMessageType[] };\n\n // Groq expects the content to be a string for system and assistant messages\n // OpenAI transformer takes care of validating role and modality\n transformedMessages.messages.forEach((message) => {\n if (message.role === \"system\") {\n if (typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n } else if (message.role === \"assistant\") {\n if (message.content && typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n }\n });\n\n return transformedMessages;\n }\n getModelPricing(): ChatModelPriceType {\n // Check if the modelName exists in pricingData before accessing it\n if (!(this.modelName in pricingData)) {\n throw new ModelResponseError({\n info: `Invalid model pricing for model : '${this.modelName}'`,\n cause: new Error(`No pricing configuration found for model \"${this.modelName}\"`),\n });\n }\n\n const entry = pricingData[this.modelName as keyof typeof pricingData];\n return entry as ChatModelPriceType;\n }\n}\n\nexport { BaseChatModelGroq, BaseChatModelOptions, type BaseChatModelOptionsType };\n","import { CHAT_CONFIG, MultiStringConfigItem, RangeConfigItem, SelectStringConfigItem } from \"@adaline/provider\";\n\nconst temperature = RangeConfigItem({\n param: \"temperature\",\n title: CHAT_CONFIG.TEMPERATURE.title,\n description: CHAT_CONFIG.TEMPERATURE.description,\n min: 0,\n max: 2,\n step: 0.01,\n default: 1,\n});\n\nconst maxTokens = (maxOutputTokens: number) =>\n RangeConfigItem({\n param: \"max_tokens\",\n title: CHAT_CONFIG.MAX_TOKENS.title,\n description: CHAT_CONFIG.MAX_TOKENS.description,\n min: 0,\n max: maxOutputTokens,\n step: 1,\n default: 0,\n });\n\nconst stop = MultiStringConfigItem({\n param: \"stop\",\n title: CHAT_CONFIG.STOP(4).title,\n description: CHAT_CONFIG.STOP(4).description,\n max: 4,\n});\n\nconst topP = RangeConfigItem({\n param: \"top_p\",\n title: CHAT_CONFIG.TOP_P.title,\n description: CHAT_CONFIG.TOP_P.description,\n min: 0,\n max: 1,\n step: 0.01,\n default: 1,\n});\n\nconst frequencyPenalty = RangeConfigItem({\n param: \"frequency_penalty\",\n title: CHAT_CONFIG.FREQUENCY_PENALTY.title,\n description: CHAT_CONFIG.FREQUENCY_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst presencePenalty = RangeConfigItem({\n param: \"presence_penalty\",\n title: CHAT_CONFIG.PRESENCE_PENALTY.title,\n description: CHAT_CONFIG.PRESENCE_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst seed = RangeConfigItem({\n param: \"seed\",\n title: CHAT_CONFIG.SEED.title,\n description: CHAT_CONFIG.SEED.description,\n min: 0,\n max: 1000000,\n step: 1,\n default: 0,\n});\n\nconst responseFormat = SelectStringConfigItem({\n param: \"response_format\",\n title: CHAT_CONFIG.RESPONSE_FORMAT.title,\n description: CHAT_CONFIG.RESPONSE_FORMAT.description,\n default: \"text\",\n choices: [\"text\", \"json_object\"],\n});\n\nconst toolChoice = SelectStringConfigItem({\n param: \"tool_choice\",\n title: \"Tool choice\",\n description:\n \"Controls which (if any) tool is called by the model. \\\n 'none' means the model will not call a function. \\\n 'auto' means the model can pick between generating a message or calling a tool.\",\n default: \"auto\",\n choices: [\"auto\", \"required\", \"none\"],\n});\n\nexport { frequencyPenalty, maxTokens, presencePenalty, seed, stop, temperature, toolChoice, topP, responseFormat };\n","import { z } from \"zod\";\n\nimport {\n frequencyPenalty,\n maxTokens,\n presencePenalty,\n responseFormat,\n seed,\n stop,\n temperature,\n toolChoice,\n topP,\n} from \"./common.config.chat-model.groq\";\n\nconst ChatModelBaseConfigSchema = (maxOutputTokens: number) =>\n z.object({\n temperature: temperature.schema,\n maxTokens: maxTokens(maxOutputTokens).schema,\n stop: stop.schema,\n topP: topP.schema,\n frequencyPenalty: frequencyPenalty.schema,\n presencePenalty: presencePenalty.schema,\n seed: seed.schema.transform((value) => (value === 0 ? undefined : value)),\n responseFormat: responseFormat.schema,\n toolChoice: toolChoice.schema,\n });\n\nconst ChatModelBaseConfigDef = (maxOutputTokens: number) =>\n ({\n temperature: temperature.def,\n maxTokens: maxTokens(maxOutputTokens).def,\n stop: stop.def,\n topP: topP.def,\n frequencyPenalty: frequencyPenalty.def,\n presencePenalty: presencePenalty.def,\n seed: seed.def,\n responseFormat: responseFormat.def,\n toolChoice: toolChoice.def,\n }) as const;\n\nexport { ChatModelBaseConfigDef, ChatModelBaseConfigSchema };\n","import { ChatModelBaseConfigDef, ChatModelBaseConfigSchema } from \"./chat-model\";\n\nconst GroqChatModelConfigs = {\n base: (maxOutputTokens: number) => ({\n def: ChatModelBaseConfigDef(maxOutputTokens),\n schema: ChatModelBaseConfigSchema(maxOutputTokens),\n }),\n} as const;\n\nexport { GroqChatModelConfigs };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Gemma2_9b_ITLiteral = \"gemma2-9b-it\" as const;\n// https://huggingface.co/google/gemma-2-9b-it\nconst Gemma2_9b_ITDescription =\n \"Gemma is a family of lightweight, state-of-the-art open models from Google, \\\n built from the same research and technology used to create the Gemini models.\";\n\nconst Gemma2_9b_ITSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Gemma2_9b_ITLiteral,\n description: Gemma2_9b_ITDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n price: pricingData[Gemma2_9b_ITLiteral],\n});\n\nconst Gemma2_9b_ITOptions = BaseChatModelOptions;\ntype Gemma2_9b_ITOptionsType = z.infer<typeof Gemma2_9b_ITOptions>;\n\nclass Gemma2_9b_IT extends BaseChatModelGroq {\n constructor(options: Gemma2_9b_ITOptionsType) {\n super(Gemma2_9b_ITSchema, options);\n }\n}\n\nexport { Gemma2_9b_IT, Gemma2_9b_ITLiteral, Gemma2_9b_ITOptions, Gemma2_9b_ITSchema, type Gemma2_9b_ITOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_8bLiteral = \"llama-3.1-8b-instant\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_8bDescription =\n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_8bLiteral,\n description: Llama_3_1_8bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_1_8bLiteral],\n});\n\nconst Llama_3_1_8b_Options = BaseChatModelOptions;\ntype Llama_3_1_8b_OptionsType = z.infer<typeof Llama_3_1_8b_Options>;\n\nclass Llama_3_1_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_8b_OptionsType) {\n super(Llama_3_1_8bSchema, options);\n }\n}\n\nexport { Llama_3_1_8b, Llama_3_1_8b_Options, Llama_3_1_8bLiteral, Llama_3_1_8bSchema, type Llama_3_1_8b_OptionsType };\n","import { z } from \"zod\";\n\nimport { OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap } from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_11b_VisionLiteral = \"llama-3.2-11b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-11B-Vision\nconst Llama_3_2_11b_VisionDescription =\n \"The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, \\\n and answering general questions about an image. \\\n The models outperform many of the available open source and closed multimodal models on common industry benchmarks.\";\n\nconst Llama_3_2_11b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_11b_VisionLiteral,\n description: Llama_3_2_11b_VisionDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_2_11b_VisionLiteral],\n});\n\nconst Llama_3_2_11b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_11b_VisionOptionsType = z.infer<typeof Llama_3_2_11b_VisionOptions>;\n\nclass Llama_3_2_11b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_11b_VisionOptionsType) {\n super(Llama_3_2_11b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionLiteral,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n type Llama_3_2_11b_VisionOptionsType,\n};\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_1bLiteral = \"llama-3.2-1b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-1B\nconst Llama_3_2_1bDescription =\n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_1bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_1bLiteral,\n description: Llama_3_2_1bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_2_1bLiteral],\n});\n\nconst Llama_3_2_1b_Options = BaseChatModelOptions;\ntype Llama_3_2_1b_OptionsType = z.infer<typeof Llama_3_2_1b_Options>;\n\nclass Llama_3_2_1b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_1b_OptionsType) {\n super(Llama_3_2_1bSchema, options);\n }\n}\n\nexport { Llama_3_2_1b, Llama_3_2_1b_Options, Llama_3_2_1bLiteral, Llama_3_2_1bSchema, type Llama_3_2_1b_OptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_3bLiteral = \"llama-3.2-3b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-3B\nconst Llama_3_2_3bDescription =\n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_3bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_3bLiteral,\n description: Llama_3_2_3bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_2_3bLiteral],\n});\n\nconst Llama_3_2_3b_Options = BaseChatModelOptions;\ntype Llama_3_2_3b_OptionsType = z.infer<typeof Llama_3_2_3b_Options>;\n\nclass Llama_3_2_3b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_3b_OptionsType) {\n super(Llama_3_2_3bSchema, options);\n }\n}\n\nexport { Llama_3_2_3b, Llama_3_2_3b_Options, Llama_3_2_3bLiteral, Llama_3_2_3bSchema, type Llama_3_2_3b_OptionsType };\n","import { z } from \"zod\";\n\nimport { OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap } from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_90b_VisionLiteral = \"llama-3.2-90b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-90B-Vision\nconst Llama_3_2_90b_VisionDescription =\n \"The Llama 3.2-90B Vision instruction-tuned models are optimized for advanced visual recognition, \\\n complex image reasoning, detailed captioning, and answering intricate questions about images. \\\n These models achieve state-of-the-art results on multiple industry benchmarks for multimodal tasks.\";\n\nconst Llama_3_2_90b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_90b_VisionLiteral,\n description: Llama_3_2_90b_VisionDescription,\n maxInputTokens: 131072,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_2_90b_VisionLiteral],\n});\n\nconst Llama_3_2_90b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_90b_VisionOptionsType = z.infer<typeof Llama_3_2_90b_VisionOptions>;\n\nclass Llama_3_2_90b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_90b_VisionOptionsType) {\n super(Llama_3_2_90b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_90b_Vision,\n Llama_3_2_90b_VisionLiteral,\n Llama_3_2_90b_VisionOptions,\n Llama_3_2_90b_VisionSchema,\n type Llama_3_2_90b_VisionOptionsType,\n};\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70bLiteral = \"llama3-70b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct\nconst Llama_3_70bDescription =\n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70bLiteral,\n description: Llama_3_70bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n price: pricingData[Llama_3_70bLiteral],\n});\n\nconst Llama_3_70bOptions = BaseChatModelOptions;\ntype Llama_3_70bOptionsType = z.infer<typeof Llama_3_70bOptions>;\n\nclass Llama_3_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_70bOptionsType) {\n super(Llama_3_70bSchema, options);\n }\n}\n\nexport { Llama_3_70b, Llama_3_70bLiteral, Llama_3_70bOptions, Llama_3_70bSchema, type Llama_3_70bOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8bLiteral = \"llama3-8b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct\nconst Llama_3_8bDescription =\n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8bLiteral,\n description: Llama_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n price: pricingData[Llama_3_8bLiteral],\n});\n\nconst Llama_3_8bOptions = BaseChatModelOptions;\ntype Llama_3_8bOptionsType = z.infer<typeof Llama_3_8bOptions>;\n\nclass Llama_3_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_8bOptionsType) {\n super(Llama_3_8bSchema, options);\n }\n}\n\nexport { Llama_3_8b, Llama_3_8bLiteral, Llama_3_8bOptions, Llama_3_8bSchema, type Llama_3_8bOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst LlamaGuard_3_8bLiteral = \"llama-guard-3-8b\" as const;\n// https://huggingface.co/meta-llama/Llama-Guard-3-8B\nconst LlamaGuard_3_8bDescription = \"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.\";\n\nconst LlamaGuard_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: LlamaGuard_3_8bLiteral,\n description: LlamaGuard_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n price: pricingData[LlamaGuard_3_8bLiteral],\n});\n\nconst LlamaGuard_3_8bOptions = BaseChatModelOptions;\ntype LlamaGuard_3_8bOptionsType = z.infer<typeof LlamaGuard_3_8bOptions>;\n\nclass LlamaGuard_3_8b extends BaseChatModelGroq {\n constructor(options: LlamaGuard_3_8bOptionsType) {\n super(LlamaGuard_3_8bSchema, options);\n }\n}\n\nexport { LlamaGuard_3_8b, LlamaGuard_3_8bLiteral, LlamaGuard_3_8bOptions, LlamaGuard_3_8bSchema, type LlamaGuard_3_8bOptionsType };\n","import { z } from \"zod\";\n\nimport { ChatModelSchemaType, ChatModelV1, EmbeddingModelSchemaType, EmbeddingModelV1, ProviderError, ProviderV1 } from \"@adaline/provider\";\n\nimport * as Models from \"../models\";\n\nconst ProviderLiteral = \"groq\";\nclass Groq<C extends Models.BaseChatModelOptionsType, E extends Record<string, any> = Record<string, any>> implements ProviderV1<C, E> {\n readonly version = \"v1\" as const;\n readonly name = ProviderLiteral;\n static readonly baseUrl = \"https://api.groq.com/openai/v1\";\n\n private readonly chatModelFactories: Record<\n string,\n {\n model: { new (options: any): ChatModelV1 };\n modelOptions: z.ZodType<any>;\n modelSchema: ChatModelSchemaType;\n }\n > = {\n [Models.Gemma2_9b_ITLiteral]: {\n model: Models.Gemma2_9b_IT,\n modelOptions: Models.Gemma2_9b_ITOptions,\n modelSchema: Models.Gemma2_9b_ITSchema,\n },\n [Models.LlamaGuard_3_8bLiteral]: {\n model: Models.LlamaGuard_3_8b,\n modelOptions: Models.LlamaGuard_3_8bOptions,\n modelSchema: Models.LlamaGuard_3_8bSchema,\n },\n [Models.Llama_3_8bLiteral]: {\n model: Models.Llama_3_8b,\n modelOptions: Models.Llama_3_8bOptions,\n modelSchema: Models.Llama_3_8bSchema,\n },\n [Models.Llama_3_70bLiteral]: {\n model: Models.Llama_3_70b,\n modelOptions: Models.Llama_3_70bOptions,\n modelSchema: Models.Llama_3_70bSchema,\n },\n [Models.Llama_3_1_8bLiteral]: {\n model: Models.Llama_3_1_8b,\n modelOptions: Models.Llama_3_1_8b_Options,\n modelSchema: Models.Llama_3_1_8bSchema,\n },\n [Models.Llama_3_2_11b_VisionLiteral]: {\n model: Models.Llama_3_2_11b_Vision,\n modelOptions: Models.Llama_3_2_11b_VisionOptions,\n modelSchema: Models.Llama_3_2_11b_VisionSchema,\n },\n [Models.Llama_3_2_90b_VisionLiteral]: {\n model: Models.Llama_3_2_90b_Vision,\n modelOptions: Models.Llama_3_2_90b_VisionOptions,\n modelSchema: Models.Llama_3_2_90b_VisionSchema,\n },\n [Models.Llama_3_2_3bLiteral]: {\n model: Models.Llama_3_2_3b,\n modelOptions: Models.Llama_3_2_3b_Options,\n modelSchema: Models.Llama_3_2_3bSchema,\n },\n [Models.Llama_3_2_1bLiteral]: {\n model: Models.Llama_3_2_1b,\n modelOptions: Models.Llama_3_2_1b_Options,\n modelSchema: Models.Llama_3_2_1bSchema,\n },\n };\n\n private readonly embeddingModelFactories: Record<\n string,\n {\n model: { new (options: any): EmbeddingModelV1 };\n modelOptions: z.ZodType<any>;\n modelSchema: EmbeddingModelSchemaType;\n }\n > = {};\n\n chatModelLiterals(): string[] {\n return Object.keys(this.chatModelFactories);\n }\n\n chatModelSchemas(): Record<string, ChatModelSchemaType> {\n return Object.keys(this.chatModelFactories).reduce(\n (acc, key) => {\n acc[key] = this.chatModelFactories[key].modelSchema;\n return acc;\n },\n {} as Record<string, ChatModelSchemaType>\n );\n }\n\n chatModel(options: C): ChatModelV1 {\n const modelName = options.modelName;\n if (!(modelName in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${modelName} not found`,\n cause: new Error(`Groq chat model: ${modelName} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.chatModelFactories[modelName].model;\n const parsedOptions = this.chatModelFactories[modelName].modelOptions.parse(options);\n return new model(parsedOptions);\n }\n\n embeddingModelLiterals(): string[] {\n return Object.keys(this.embeddingModelFactories);\n }\n\n embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType> {\n return Object.keys(this.embeddingModelFactories).reduce(\n (acc, key) => {\n acc[key] = this.embeddingModelFactories[key].modelSchema;\n return acc;\n },\n {} as Record<string, EmbeddingModelSchemaType>\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n embeddingModel(options: E): EmbeddingModelV1 {\n throw new ProviderError({\n info: \"Groq does not support embedding models yet\",\n cause: new Error(\"Groq does not support embedding models yet\"),\n });\n }\n}\n\nexport { Groq };\n"]}
|
package/dist/index.mjs
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
import { RangeConfigItem, CHAT_CONFIG, MultiStringConfigItem, SelectStringConfigItem, ChatModelSchema, InvalidMessagesError, ProviderError } from '@adaline/provider';
|
|
1
|
+
import { RangeConfigItem, CHAT_CONFIG, MultiStringConfigItem, SelectStringConfigItem, ChatModelSchema, InvalidMessagesError, ModelResponseError, ProviderError } from '@adaline/provider';
|
|
2
2
|
import { z as z$1 } from 'zod';
|
|
3
3
|
import { OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum, OpenAIChatModelRolesMap, OpenAIChatModelTextToolModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelModalities, BaseChatModel } from '@adaline/openai';
|
|
4
4
|
|
|
5
|
-
var
|
|
6
|
-
`)):
|
|
7
|
-
`));}),
|
|
8
|
-
${this.chatModelLiterals().join(", ")}`)});let
|
|
5
|
+
var a={"gemma2-9b-it":{modelName:"gemma2-9b-it",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.2,outputPricePerMillion:.2}}}]},"llama-3.1-8b-instant":{modelName:"llama-3.1-8b-instant",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.05,outputPricePerMillion:.08}}}]},"llama-3.2-11b-vision-preview":{modelName:"llama-3.2-11b-vision-preview",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.18,outputPricePerMillion:.18}}}]},"llama-3.2-1b-preview":{modelName:"llama-3.2-1b-preview",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.04,outputPricePerMillion:.04}}}]},"llama-3.2-3b-preview":{modelName:"llama-3.2-3b-preview",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.06,outputPricePerMillion:.06}}}]},"llama-3.2-90b-vision-preview":{modelName:"llama-3.2-90b-vision-preview",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.7,outputPricePerMillion:.8}}}]},"llama3-70b-8192":{modelName:"llama3-70b-8192",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.59,outputPricePerMillion:.79}}}]},"llama3-8b-8192":{modelName:"llama3-8b-8192",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.05,outputPricePerMillion:.08}}}]},"llama-guard-3-8b":{modelName:"llama-guard-3-8b",currency:"USD",tokenRanges:[{minTokens:0,maxTokens:null,prices:{base:{inputPricePerMillion:.2,outputPricePerMillion:.2}}}]}};var n=z$1.object({modelName:z$1.string().min(1),apiKey:z$1.string().min(1)}),t=class extends BaseChatModel{constructor(i,c){let r=n.parse(c),p=h.baseUrl;super(i,{modelName:r.modelName,apiKey:r.apiKey,baseUrl:p,completeChatUrl:`${p}/chat/completions`,streamChatUrl:`${p}/chat/completions`});this.version="v1";this.modelSchema=i,this.modelName=r.modelName,this.groqApiKey=r.apiKey;}transformMessages(i){let c=i.some(l=>l.role==="system"),r=i.some(l=>l.content.some(_=>_.modality==="image"));if(c&&r)throw new InvalidMessagesError({info:`Invalid message content for model : '${this.modelName}'`,cause:new Error("Prompting with images is incompatible with system messages`)")});let p=super.transformMessages(i);return p.messages.forEach(l=>{l.role==="system"?typeof l.content!="string"&&(l.content=l.content.map(_=>_.text).join(`
|
|
6
|
+
`)):l.role==="assistant"&&l.content&&typeof l.content!="string"&&(l.content=l.content.map(_=>_.text).join(`
|
|
7
|
+
`));}),p}getModelPricing(){if(!(this.modelName in a))throw new ModelResponseError({info:`Invalid model pricing for model : '${this.modelName}'`,cause:new Error(`No pricing configuration found for model "${this.modelName}"`)});return a[this.modelName]}};var v=RangeConfigItem({param:"temperature",title:CHAT_CONFIG.TEMPERATURE.title,description:CHAT_CONFIG.TEMPERATURE.description,min:0,max:2,step:.01,default:1}),N=s=>RangeConfigItem({param:"max_tokens",title:CHAT_CONFIG.MAX_TOKENS.title,description:CHAT_CONFIG.MAX_TOKENS.description,min:0,max:s,step:1,default:0}),q=MultiStringConfigItem({param:"stop",title:CHAT_CONFIG.STOP(4).title,description:CHAT_CONFIG.STOP(4).description,max:4}),w=RangeConfigItem({param:"top_p",title:CHAT_CONFIG.TOP_P.title,description:CHAT_CONFIG.TOP_P.description,min:0,max:1,step:.01,default:1}),z=RangeConfigItem({param:"frequency_penalty",title:CHAT_CONFIG.FREQUENCY_PENALTY.title,description:CHAT_CONFIG.FREQUENCY_PENALTY.description,min:-2,max:2,step:.01,default:0}),B=RangeConfigItem({param:"presence_penalty",title:CHAT_CONFIG.PRESENCE_PENALTY.title,description:CHAT_CONFIG.PRESENCE_PENALTY.description,min:-2,max:2,step:.01,default:0}),D=RangeConfigItem({param:"seed",title:CHAT_CONFIG.SEED.title,description:CHAT_CONFIG.SEED.description,min:0,max:1e6,step:1,default:0}),V=SelectStringConfigItem({param:"response_format",title:CHAT_CONFIG.RESPONSE_FORMAT.title,description:CHAT_CONFIG.RESPONSE_FORMAT.description,default:"text",choices:["text","json_object"]}),F=SelectStringConfigItem({param:"tool_choice",title:"Tool choice",description:"Controls which (if any) tool is called by the model. 'none' means the model will not call a function. 'auto' means the model can pick between generating a message or calling a tool.",default:"auto",choices:["auto","required","none"]});var W=s=>z$1.object({temperature:v.schema,maxTokens:N(s).schema,stop:q.schema,topP:w.schema,frequencyPenalty:z.schema,presencePenalty:B.schema,seed:D.schema.transform(e=>e===0?void 0:e),responseFormat:V.schema,toolChoice:F.schema}),ee=s=>({temperature:v.def,maxTokens:N(s).def,stop:q.def,topP:w.def,frequencyPenalty:z.def,presencePenalty:B.def,seed:D.def,responseFormat:V.def,toolChoice:F.def});var o={base:s=>({def:ee(s),schema:W(s)})};var u="gemma2-9b-it",ge="Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models.",U=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:u,description:ge,maxInputTokens:8192,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema},price:a[u]}),oe=n,M=class extends t{constructor(e){super(U,e);}};var f="llama-3.1-8b-instant",Ie="The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",j=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:f,description:Ie,maxInputTokens:128e3,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[f]}),ae=n,b=class extends t{constructor(e){super(j,e);}};var y="llama-3.2-11b-vision-preview",ve="The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image. The models outperform many of the available open source and closed multimodal models on common industry benchmarks.",K=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelModalitiesEnum).parse({name:y,description:ve,maxInputTokens:128e3,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[y]}),te=n,T=class extends t{constructor(e){super(K,e);}};var O="llama-3.2-1b-preview",De="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",$=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:O,description:De,maxInputTokens:128e3,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[O]}),se=n,g=class extends t{constructor(e){super($,e);}};var C="llama-3.2-3b-preview",$e="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",Y=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:C,description:$e,maxInputTokens:128e3,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[C]}),ie=n,L=class extends t{constructor(e){super(Y,e);}};var x="llama-3.2-90b-vision-preview",Je="The Llama 3.2-90B Vision instruction-tuned models are optimized for advanced visual recognition, complex image reasoning, detailed captioning, and answering intricate questions about images. These models achieve state-of-the-art results on multiple industry benchmarks for multimodal tasks.",Q=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelModalitiesEnum).parse({name:x,description:Je,maxInputTokens:131072,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema},price:a[x]}),ne=n,S=class extends t{constructor(e){super(Q,e);}};var P="llama3-70b-8192",so="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",X=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:P,description:so,maxInputTokens:8192,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema},price:a[P]}),me=n,I=class extends t{constructor(e){super(X,e);}};var k="llama3-8b-8192",po="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",Z=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:k,description:po,maxInputTokens:8192,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema},price:a[k]}),le=n,E=class extends t{constructor(e){super(Z,e);}};var R="llama-guard-3-8b",fo="Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.",H=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:R,description:fo,maxInputTokens:8192,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema},price:a[R]}),re=n,A=class extends t{constructor(e){super(H,e);}};var bo="groq",h=class{constructor(){this.version="v1";this.name=bo;this.chatModelFactories={[u]:{model:M,modelOptions:oe,modelSchema:U},[R]:{model:A,modelOptions:re,modelSchema:H},[k]:{model:E,modelOptions:le,modelSchema:Z},[P]:{model:I,modelOptions:me,modelSchema:X},[f]:{model:b,modelOptions:ae,modelSchema:j},[y]:{model:T,modelOptions:te,modelSchema:K},[x]:{model:S,modelOptions:ne,modelSchema:Q},[C]:{model:L,modelOptions:ie,modelSchema:Y},[O]:{model:g,modelOptions:se,modelSchema:$}};this.embeddingModelFactories={};}chatModelLiterals(){return Object.keys(this.chatModelFactories)}chatModelSchemas(){return Object.keys(this.chatModelFactories).reduce((e,i)=>(e[i]=this.chatModelFactories[i].modelSchema,e),{})}chatModel(e){let i=e.modelName;if(!(i in this.chatModelFactories))throw new ProviderError({info:`Groq chat model: ${i} not found`,cause:new Error(`Groq chat model: ${i} not found, available chat models:
|
|
8
|
+
${this.chatModelLiterals().join(", ")}`)});let c=this.chatModelFactories[i].model,r=this.chatModelFactories[i].modelOptions.parse(e);return new c(r)}embeddingModelLiterals(){return Object.keys(this.embeddingModelFactories)}embeddingModelSchemas(){return Object.keys(this.embeddingModelFactories).reduce((e,i)=>(e[i]=this.embeddingModelFactories[i].modelSchema,e),{})}embeddingModel(e){throw new ProviderError({info:"Groq does not support embedding models yet",cause:new Error("Groq does not support embedding models yet")})}};h.baseUrl="https://api.groq.com/openai/v1";
|
|
9
9
|
|
|
10
|
-
export {
|
|
10
|
+
export { t as BaseChatModelGroq, n as BaseChatModelOptions, M as Gemma2_9b_IT, u as Gemma2_9b_ITLiteral, oe as Gemma2_9b_ITOptions, U as Gemma2_9b_ITSchema, h as Groq, A as LlamaGuard_3_8b, R as LlamaGuard_3_8bLiteral, re as LlamaGuard_3_8bOptions, H as LlamaGuard_3_8bSchema, b as Llama_3_1_8b, f as Llama_3_1_8bLiteral, j as Llama_3_1_8bSchema, ae as Llama_3_1_8b_Options, T as Llama_3_2_11b_Vision, y as Llama_3_2_11b_VisionLiteral, te as Llama_3_2_11b_VisionOptions, K as Llama_3_2_11b_VisionSchema, g as Llama_3_2_1b, O as Llama_3_2_1bLiteral, $ as Llama_3_2_1bSchema, se as Llama_3_2_1b_Options, L as Llama_3_2_3b, C as Llama_3_2_3bLiteral, Y as Llama_3_2_3bSchema, ie as Llama_3_2_3b_Options, S as Llama_3_2_90b_Vision, x as Llama_3_2_90b_VisionLiteral, ne as Llama_3_2_90b_VisionOptions, Q as Llama_3_2_90b_VisionSchema, I as Llama_3_70b, P as Llama_3_70bLiteral, me as Llama_3_70bOptions, X as Llama_3_70bSchema, E as Llama_3_8b, k as Llama_3_8bLiteral, le as Llama_3_8bOptions, Z as Llama_3_8bSchema };
|
|
11
11
|
//# sourceMappingURL=index.mjs.map
|
|
12
12
|
//# sourceMappingURL=index.mjs.map
|
package/dist/index.mjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/models/chat-models/base-chat-model.groq.ts","../src/configs/chat-model/common.config.chat-model.groq.ts","../src/configs/chat-model/base.config.chat-model.groq.ts","../src/configs/configs.groq.ts","../src/models/chat-models/gemma2-9b-it.groq.ts","../src/models/chat-models/llama-3-1-8b.groq.ts","../src/models/chat-models/llama-3-2-11b-vision.groq.ts","../src/models/chat-models/llama-3-2-1b.groq.ts","../src/models/chat-models/llama-3-2-3b.groq.ts","../src/models/chat-models/llama-3-2-90b-vision.groq.ts","../src/models/chat-models/llama-3-70b.groq.ts","../src/models/chat-models/llama-3-8b.groq.ts","../src/models/chat-models/llama-guard-3-8b.groq.ts","../src/provider/provider.groq.ts"],"names":["BaseChatModelOptions","z","BaseChatModelGroq","BaseChatModel","modelSchema","options","parsedOptions","baseUrl","Groq","messages","hasSystemRole","msg","hasImageModality","content","InvalidMessagesError","transformedMessages","message","temperature","RangeConfigItem","CHAT_CONFIG","maxTokens","maxOutputTokens","stop","MultiStringConfigItem","topP","frequencyPenalty","presencePenalty","seed","responseFormat","SelectStringConfigItem","toolChoice","ChatModelBaseConfigSchema","value","ChatModelBaseConfigDef","GroqChatModelConfigs","Gemma2_9b_ITLiteral","Gemma2_9b_ITDescription","Gemma2_9b_ITSchema","ChatModelSchema","OpenAIChatModelRoles","OpenAIChatModelTextToolModalitiesEnum","OpenAIChatModelRolesMap","OpenAIChatModelTextToolModalities","Gemma2_9b_ITOptions","Gemma2_9b_IT","Llama_3_1_8bLiteral","Llama_3_1_8bDescription","Llama_3_1_8bSchema","Llama_3_1_8b_Options","Llama_3_1_8b","Llama_3_2_11b_VisionLiteral","Llama_3_2_11b_VisionDescription","Llama_3_2_11b_VisionSchema","OpenAIChatModelModalitiesEnum","OpenAIChatModelModalities","Llama_3_2_11b_VisionOptions","Llama_3_2_11b_Vision","Llama_3_2_1bLiteral","Llama_3_2_1bDescription","Llama_3_2_1bSchema","Llama_3_2_1b_Options","Llama_3_2_1b","Llama_3_2_3bLiteral","Llama_3_2_3bDescription","Llama_3_2_3bSchema","Llama_3_2_3b_Options","Llama_3_2_3b","Llama_3_2_90b_VisionLiteral","Llama_3_2_90b_VisionDescription","Llama_3_2_90b_VisionSchema","Llama_3_2_90b_VisionOptions","Llama_3_2_90b_Vision","Llama_3_70bLiteral","Llama_3_70bDescription","Llama_3_70bSchema","Llama_3_70bOptions","Llama_3_70b","Llama_3_8bLiteral","Llama_3_8bDescription","Llama_3_8bSchema","Llama_3_8bOptions","Llama_3_8b","LlamaGuard_3_8bLiteral","LlamaGuard_3_8bDescription","LlamaGuard_3_8bSchema","LlamaGuard_3_8bOptions","LlamaGuard_3_8b","ProviderLiteral","acc","key","modelName","ProviderError","model"],"mappings":";;;;IAQMA,CAAuBC,CAAAA,GAAAA,CAAE,MAAO,CAAA,CACpC,UAAWA,GAAE,CAAA,MAAA,EAAS,CAAA,GAAA,CAAI,CAAC,CAC3B,CAAA,MAAA,CAAQA,GAAE,CAAA,MAAA,GAAS,GAAI,CAAA,CAAC,CAC1B,CAAC,CAAA,CAGKC,EAAN,cAAgCC,aAAc,CAO5C,WAAA,CAAYC,EAAkCC,CAAmC,CAAA,CAC/E,IAAMC,CAAAA,CAAgBN,EAAqB,KAAMK,CAAAA,CAAO,CAClDE,CAAAA,CAAAA,CAAUC,EAAK,OACrB,CAAA,KAAA,CAAMJ,CAAa,CAAA,CACjB,UAAWE,CAAc,CAAA,SAAA,CACzB,MAAQA,CAAAA,CAAAA,CAAc,OACtB,OAASC,CAAAA,CAAAA,CACT,eAAiB,CAAA,CAAA,EAAGA,CAAO,CAC3B,iBAAA,CAAA,CAAA,aAAA,CAAe,CAAGA,EAAAA,CAAO,mBAC3B,CAAC,CAAA,CAfH,KAAS,OAAU,CAAA,IAAA,CAgBjB,KAAK,WAAcH,CAAAA,CAAAA,CACnB,IAAK,CAAA,SAAA,CAAYE,EAAc,SAC/B,CAAA,IAAA,CAAK,UAAaA,CAAAA,CAAAA,CAAc,OAClC,CAEA,iBAAA,CAAkBG,CAAqC,CAAA,CACrD,IAAMC,CAAgBD,CAAAA,CAAAA,CAAS,IAAME,CAAAA,CAAAA,EAAQA,EAAI,IAAS,GAAA,QAAQ,CAC5DC,CAAAA,CAAAA,CAAmBH,EAAS,IAAME,CAAAA,CAAAA,EAAQA,CAAI,CAAA,OAAA,CAAQ,KAAME,CAAiBA,EAAAA,CAAAA,CAAQ,QAAa,GAAA,OAAO,CAAC,CAEhH,CAAA,GAAIH,GAAiBE,CACnB,CAAA,MAAM,IAAIE,oBAAqB,CAAA,CAC7B,IAAM,CAAA,CAAA,qCAAA,EAAwC,KAAK,SAAS,CAAA,CAAA,CAAA,CAC5D,KAAO,CAAA,IAAI,MAAM,8DAA8D,CACjF,CAAC,CAAA,CAGH,IAAMC,CAAsB,CAAA,KAAA,CAAM,kBAAkBN,CAAQ,CAAA,CAI5D,OAAAM,CAAoB,CAAA,QAAA,CAAS,OAASC,CAAAA,CAAAA,EAAY,CAC5CA,CAAQ,CAAA,IAAA,GAAS,QACf,CAAA,OAAOA,EAAQ,OAAY,EAAA,QAAA,GAC7BA,CAAQ,CAAA,OAAA,CAAUA,EAAQ,OAAQ,CAAA,GAAA,CAAKH,GAAYA,CAAQ,CAAA,IAAI,EAAE,IAAK,CAAA,CAAA;AAAA,CAAI,GAEnEG,CAAQ,CAAA,IAAA,GAAS,aACtBA,CAAQ,CAAA,OAAA,EAAW,OAAOA,CAAQ,CAAA,OAAA,EAAY,WAChDA,CAAQ,CAAA,OAAA,CAAUA,EAAQ,OAAQ,CAAA,GAAA,CAAKH,GAAYA,CAAQ,CAAA,IAAI,EAAE,IAAK,CAAA,CAAA;AAAA,CAAI,CAGhF,EAAA,CAAC,CAEME,CAAAA,CACT,CACF,EC/DA,IAAME,CAAcC,CAAAA,eAAAA,CAAgB,CAClC,KAAA,CAAO,aACP,CAAA,KAAA,CAAOC,WAAY,CAAA,WAAA,CAAY,KAC/B,CAAA,WAAA,CAAaA,WAAY,CAAA,WAAA,CAAY,YACrC,GAAK,CAAA,CAAA,CACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAEKC,CAAAA,CAAAA,CAAaC,CACjBH,EAAAA,eAAAA,CAAgB,CACd,KAAA,CAAO,YACP,CAAA,KAAA,CAAOC,YAAY,UAAW,CAAA,KAAA,CAC9B,WAAaA,CAAAA,WAAAA,CAAY,UAAW,CAAA,WAAA,CACpC,GAAK,CAAA,CAAA,CACL,GAAKE,CAAAA,CAAAA,CACL,IAAM,CAAA,CAAA,CACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEGC,EAAOC,qBAAsB,CAAA,CACjC,KAAO,CAAA,MAAA,CACP,KAAOJ,CAAAA,WAAAA,CAAY,IAAK,CAAA,CAAC,EAAE,KAC3B,CAAA,WAAA,CAAaA,WAAY,CAAA,IAAA,CAAK,CAAC,CAAA,CAAE,WACjC,CAAA,GAAA,CAAK,CACP,CAAC,CAAA,CAEKK,CAAON,CAAAA,eAAAA,CAAgB,CAC3B,KAAA,CAAO,OACP,CAAA,KAAA,CAAOC,WAAY,CAAA,KAAA,CAAM,KACzB,CAAA,WAAA,CAAaA,WAAY,CAAA,KAAA,CAAM,WAC/B,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKM,EAAmBP,eAAgB,CAAA,CACvC,KAAO,CAAA,mBAAA,CACP,KAAOC,CAAAA,WAAAA,CAAY,iBAAkB,CAAA,KAAA,CACrC,YAAaA,WAAY,CAAA,iBAAA,CAAkB,WAC3C,CAAA,GAAA,CAAK,CACL,CAAA,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,CAEKO,CAAAA,CAAAA,CAAkBR,eAAgB,CAAA,CACtC,MAAO,kBACP,CAAA,KAAA,CAAOC,WAAY,CAAA,gBAAA,CAAiB,KACpC,CAAA,WAAA,CAAaA,WAAY,CAAA,gBAAA,CAAiB,YAC1C,GAAK,CAAA,CAAA,CAAA,CACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAEKQ,CAAAA,CAAAA,CAAOT,eAAgB,CAAA,CAC3B,KAAO,CAAA,MAAA,CACP,KAAOC,CAAAA,WAAAA,CAAY,IAAK,CAAA,KAAA,CACxB,WAAaA,CAAAA,WAAAA,CAAY,IAAK,CAAA,WAAA,CAC9B,GAAK,CAAA,CAAA,CACL,IAAK,GACL,CAAA,IAAA,CAAM,CACN,CAAA,OAAA,CAAS,CACX,CAAC,CAEKS,CAAAA,CAAAA,CAAiBC,uBAAuB,CAC5C,KAAA,CAAO,iBACP,CAAA,KAAA,CAAOV,WAAY,CAAA,eAAA,CAAgB,KACnC,CAAA,WAAA,CAAaA,YAAY,eAAgB,CAAA,WAAA,CACzC,OAAS,CAAA,MAAA,CACT,OAAS,CAAA,CAAC,MAAQ,CAAA,aAAa,CACjC,CAAC,CAEKW,CAAAA,CAAAA,CAAaD,sBAAuB,CAAA,CACxC,KAAO,CAAA,aAAA,CACP,MAAO,aACP,CAAA,WAAA,CACE,+LAGF,CAAA,OAAA,CAAS,MACT,CAAA,OAAA,CAAS,CAAC,MAAA,CAAQ,WAAY,MAAM,CACtC,CAAC,CAAA,CCzED,IAAME,CAAAA,CAA6BV,CACjCpB,EAAAA,GAAAA,CAAE,OAAO,CACP,WAAA,CAAagB,CAAY,CAAA,MAAA,CACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,OACtC,IAAMC,CAAAA,CAAAA,CAAK,MACX,CAAA,IAAA,CAAME,CAAK,CAAA,MAAA,CACX,gBAAkBC,CAAAA,CAAAA,CAAiB,OACnC,eAAiBC,CAAAA,CAAAA,CAAgB,MACjC,CAAA,IAAA,CAAMC,CAAK,CAAA,MAAA,CAAO,SAAWK,CAAAA,CAAAA,EAAWA,IAAU,CAAI,CAAA,KAAA,CAAA,CAAYA,CAAM,CAAA,CACxE,cAAgBJ,CAAAA,CAAAA,CAAe,MAC/B,CAAA,UAAA,CAAYE,EAAW,MACzB,CAAC,CAEGG,CAAAA,CAAAA,CAA0BZ,CAC7B,GAAA,CACC,WAAaJ,CAAAA,CAAAA,CAAY,GACzB,CAAA,SAAA,CAAWG,CAAUC,CAAAA,CAAe,CAAE,CAAA,GAAA,CACtC,IAAMC,CAAAA,CAAAA,CAAK,IACX,IAAME,CAAAA,CAAAA,CAAK,GACX,CAAA,gBAAA,CAAkBC,CAAiB,CAAA,GAAA,CACnC,eAAiBC,CAAAA,CAAAA,CAAgB,IACjC,IAAMC,CAAAA,CAAAA,CAAK,GACX,CAAA,cAAA,CAAgBC,CAAe,CAAA,GAAA,CAC/B,UAAYE,CAAAA,CAAAA,CAAW,GACzB,CCpCF,CAAA,CAAA,IAAMI,CAAuB,CAAA,CAC3B,IAAOb,CAAAA,CAAAA,GAA6B,CAClC,GAAA,CAAKY,CAAuBZ,CAAAA,CAAe,CAC3C,CAAA,MAAA,CAAQU,CAA0BV,CAAAA,CAAe,CACnD,CAAA,CACF,ECMMc,IAAAA,CAAAA,CAAsB,cAEtBC,CAAAA,EAAAA,CACJ,6JAGIC,CAAAA,CAAAA,CAAqBC,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAML,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,eAAiB,CAAA,IAAA,CACjB,KAAOK,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAAA,CAEKS,EAAsB3C,CAAAA,CAAAA,CAGtB4C,EAAN,cAA2B1C,CAAkB,CAC3C,WAAA,CAAYG,CAAkC,CAAA,CAC5C,KAAMgC,CAAAA,CAAAA,CAAoBhC,CAAO,EACnC,CACF,MC1BMwC,CAAsB,CAAA,sBAAA,CAEtBC,EACJ,CAAA,6NAAA,CAGIC,CAAqBT,CAAAA,eAAAA,CAAgBC,oBAAsBC,CAAAA,qCAAqC,EAAE,KAAM,CAAA,CAC5G,IAAMK,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOL,uBACP,CAAA,UAAA,CAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAEKc,CAAAA,EAAAA,CAAuBhD,CAGvBiD,CAAAA,CAAAA,CAAN,cAA2B/C,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAM0C,CAAoB1C,CAAAA,CAAO,EACnC,CACF,MC/BM6C,CAA8B,CAAA,8BAAA,CAE9BC,EACJ,CAAA,0RAAA,CAIIC,CAA6Bd,CAAAA,eAAAA,CAAgBC,oBAAsBc,CAAAA,6BAA6B,EAAE,KAAM,CAAA,CAC5G,IAAMH,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOV,uBACP,CAAA,UAAA,CAAYa,yBACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKpB,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAEKqB,CAAAA,EAAAA,CAA8BvD,CAG9BwD,CAAAA,CAAAA,CAAN,cAAmCtD,CAAkB,CACnD,WAAYG,CAAAA,CAAAA,CAA0C,CACpD,KAAA,CAAM+C,CAA4B/C,CAAAA,CAAO,EAC3C,CACF,ECtBA,IAAMoD,EAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,oQAGIC,CAAAA,CAAAA,CAAqBrB,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAA,CAAE,MAAM,CAC5G,IAAA,CAAMiB,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOjB,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CACF,CAAC,CAAA,CAEK0B,EAAuB5D,CAAAA,CAAAA,CAGvB6D,CAAN,CAAA,cAA2B3D,CAAkB,CAC3C,WAAA,CAAYG,CAAmC,CAAA,CAC7C,KAAMsD,CAAAA,CAAAA,CAAoBtD,CAAO,EACnC,CACF,EC1BA,IAAMyD,EAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,oQAGIC,CAAAA,CAAAA,CAAqB1B,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAA,CAAE,MAAM,CAC5G,IAAA,CAAMsB,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOtB,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CACF,CAAC,CAAA,CAEK+B,EAAuBjE,CAAAA,CAAAA,CAGvBkE,CAAN,CAAA,cAA2BhE,CAAkB,CAC3C,WAAA,CAAYG,CAAmC,CAAA,CAC7C,KAAM2D,CAAAA,CAAAA,CAAoB3D,CAAO,EACnC,CACF,EC/BM8D,IAAAA,CAAAA,CAA8B,+BAE9BC,EACJ,CAAA,wSAAA,CAIIC,CAA6B/B,CAAAA,eAAAA,CAAgBC,oBAAsBc,CAAAA,6BAA6B,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAMc,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,MAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAO3B,uBACP,CAAA,UAAA,CAAYa,yBACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKpB,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKoC,CAAAA,EAAAA,CAA8BtE,CAG9BuE,CAAAA,CAAAA,CAAN,cAAmCrE,CAAkB,CACnD,WAAYG,CAAAA,CAAAA,CAA0C,CACpD,KAAA,CAAMgE,CAA4BhE,CAAAA,CAAO,EAC3C,CACF,ECtBMmE,IAAAA,CAAAA,CAAqB,kBAErBC,EACJ,CAAA,yKAAA,CAGIC,CAAoBpC,CAAAA,eAAAA,CAAgBC,oBAAsBC,CAAAA,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC3G,IAAMgC,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,IAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAOhC,uBACP,CAAA,UAAA,CAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKyC,CAAAA,EAAAA,CAAqB3E,CAGrB4E,CAAAA,CAAAA,CAAN,cAA0B1E,CAAkB,CAC1C,WAAYG,CAAAA,CAAAA,CAAiC,CAC3C,KAAA,CAAMqE,CAAmBrE,CAAAA,CAAO,EAClC,CACF,EC1BA,IAAMwE,CAAoB,CAAA,gBAAA,CAEpBC,GACJ,yKAGIC,CAAAA,CAAAA,CAAmBzC,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAA,CAAE,KAAM,CAAA,CAC1G,KAAMqC,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOrC,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAAA,CAEK8C,EAAoBhF,CAAAA,CAAAA,CAGpBiF,CAAN,CAAA,cAAyB/E,CAAkB,CACzC,YAAYG,CAAgC,CAAA,CAC1C,KAAM0E,CAAAA,CAAAA,CAAkB1E,CAAO,EACjC,CACF,EC1BA,IAAM6E,CAAyB,CAAA,kBAAA,CAEzBC,GAA6B,iGAE7BC,CAAAA,CAAAA,CAAwB9C,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAA,CAAE,KAAM,CAAA,CAC/G,KAAM0C,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAO1C,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAAA,CAEKmD,EAAyBrF,CAAAA,CAAAA,CAGzBsF,CAAN,CAAA,cAA8BpF,CAAkB,CAC9C,YAAYG,CAAqC,CAAA,CAC/C,KAAM+E,CAAAA,CAAAA,CAAuB/E,CAAO,EACtC,CACF,MC/BMkF,EAAkB,CAAA,MAAA,CAClB/E,CAAN,CAAA,KAAuI,CAAvI,WAAA,EAAA,CACE,IAAS,CAAA,OAAA,CAAU,KACnB,IAAS,CAAA,IAAA,CAAO+E,EAGhB,CAAA,IAAA,CAAiB,kBAOb,CAAA,CACF,CAAQpD,CAAmB,EAAG,CAC5B,KAAA,CAAcS,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBN,CACtB,CAAA,CACA,CAAQ6C,CAAsB,EAAG,CAC/B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CACA,CAAA,CAAQP,CAAiB,EAAG,CAC1B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQP,CAAkB,EAAG,CAC3B,MAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQ7B,CAAmB,EAAG,CAC5B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQG,CAA2B,EAAG,CACpC,KAAA,CAAcM,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBH,CACtB,CAAA,CACA,CAAQe,CAA2B,EAAG,CACpC,KAAcI,CAAAA,CAAAA,CACd,aAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQP,CAAmB,EAAG,CAC5B,MAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQP,CAAmB,EAAG,CAC5B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CACF,CAEA,CAAA,IAAA,CAAiB,uBAOb,CAAA,GAEJ,CAAA,iBAAA,EAA8B,CAC5B,OAAO,OAAO,IAAK,CAAA,IAAA,CAAK,kBAAkB,CAC5C,CAEA,gBAAA,EAAwD,CACtD,OAAO,OAAO,IAAK,CAAA,IAAA,CAAK,kBAAkB,CAAA,CAAE,MAC1C,CAAA,CAAC6B,CAAKC,CAAAA,CAAAA,IACJD,EAAIC,CAAG,CAAA,CAAI,IAAK,CAAA,kBAAA,CAAmBA,CAAG,CAAA,CAAE,WACjCD,CAAAA,CAAAA,CAAAA,CAET,EACF,CACF,CAEA,SAAA,CAAUnF,CAAyB,CAAA,CACjC,IAAMqF,CAAAA,CAAYrF,EAAQ,SAC1B,CAAA,GAAI,EAAEqF,CAAAA,IAAa,IAAK,CAAA,kBAAA,CAAA,CACtB,MAAM,IAAIC,cAAc,CACtB,IAAA,CAAM,CAAoBD,iBAAAA,EAAAA,CAAS,CACnC,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,oBAAoBA,CAAS,CAAA;AAAA,UAAA,EAC1C,KAAK,iBAAkB,EAAA,CAAE,KAAK,IAAI,CAAC,EAAE,CAC3C,CAAC,CAGH,CAAA,IAAME,EAAQ,IAAK,CAAA,kBAAA,CAAmBF,CAAS,CAAE,CAAA,KAAA,CAC3CpF,EAAgB,IAAK,CAAA,kBAAA,CAAmBoF,CAAS,CAAA,CAAE,aAAa,KAAMrF,CAAAA,CAAO,EACnF,OAAO,IAAIuF,EAAMtF,CAAa,CAChC,CAEA,sBAAA,EAAmC,CACjC,OAAO,MAAA,CAAO,KAAK,IAAK,CAAA,uBAAuB,CACjD,CAEA,qBAAA,EAAkE,CAChE,OAAO,OAAO,IAAK,CAAA,IAAA,CAAK,uBAAuB,CAAE,CAAA,MAAA,CAC/C,CAACkF,CAAKC,CAAAA,CAAAA,IACJD,CAAIC,CAAAA,CAAG,EAAI,IAAK,CAAA,uBAAA,CAAwBA,CAAG,CAAE,CAAA,WAAA,CACtCD,GAET,EACF,CACF,CAGA,eAAenF,CAA8B,CAAA,CAC3C,MAAM,IAAIsF,aAAAA,CAAc,CACtB,IAAM,CAAA,4CAAA,CACN,MAAO,IAAI,KAAA,CAAM,4CAA4C,CAC/D,CAAC,CACH,CACF,EAvHMnF,EAGY,OAAU,CAAA,gCAAA","file":"index.mjs","sourcesContent":["import { z } from \"zod\";\n\nimport { BaseChatModel, OpenAIChatRequestMessageType } from \"@adaline/openai\";\nimport { ChatModelSchemaType, InvalidMessagesError, ParamsType } from \"@adaline/provider\";\nimport { MessageType } from \"@adaline/types\";\n\nimport { Groq } from \"../../provider\";\n\nconst BaseChatModelOptions = z.object({\n modelName: z.string().min(1),\n apiKey: z.string().min(1),\n});\ntype BaseChatModelOptionsType = z.infer<typeof BaseChatModelOptions>;\n\nclass BaseChatModelGroq extends BaseChatModel {\n readonly version = \"v1\" as const;\n modelSchema: ChatModelSchemaType;\n readonly modelName: string;\n\n private readonly groqApiKey: string;\n\n constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType) {\n const parsedOptions = BaseChatModelOptions.parse(options);\n const baseUrl = Groq.baseUrl;\n super(modelSchema, {\n modelName: parsedOptions.modelName,\n apiKey: parsedOptions.apiKey,\n baseUrl: baseUrl,\n completeChatUrl: `${baseUrl}/chat/completions`,\n streamChatUrl: `${baseUrl}/chat/completions`,\n });\n this.modelSchema = modelSchema;\n this.modelName = parsedOptions.modelName;\n this.groqApiKey = parsedOptions.apiKey;\n }\n\n transformMessages(messages: MessageType[]): ParamsType {\n const hasSystemRole = messages.some((msg) => msg.role === \"system\");\n const hasImageModality = messages.some((msg) => msg.content.some((content: any) => content.modality === \"image\"));\n\n if (hasSystemRole && hasImageModality) {\n throw new InvalidMessagesError({\n info: `Invalid message content for model : '${this.modelName}'`,\n cause: new Error(\"Prompting with images is incompatible with system messages`)\"),\n });\n }\n\n const transformedMessages = super.transformMessages(messages) as { messages: OpenAIChatRequestMessageType[] };\n\n // Groq expects the content to be a string for system and assistant messages\n // OpenAI transformer takes care of validating role and modality\n transformedMessages.messages.forEach((message) => {\n if (message.role === \"system\") {\n if (typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n } else if (message.role === \"assistant\") {\n if (message.content && typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n }\n });\n\n return transformedMessages;\n }\n}\n\nexport { BaseChatModelGroq, BaseChatModelOptions, type BaseChatModelOptionsType };\n","import { CHAT_CONFIG, MultiStringConfigItem, RangeConfigItem, SelectStringConfigItem } from \"@adaline/provider\";\n\nconst temperature = RangeConfigItem({\n param: \"temperature\",\n title: CHAT_CONFIG.TEMPERATURE.title,\n description: CHAT_CONFIG.TEMPERATURE.description,\n min: 0,\n max: 2,\n step: 0.01,\n default: 1,\n});\n\nconst maxTokens = (maxOutputTokens: number) =>\n RangeConfigItem({\n param: \"max_tokens\",\n title: CHAT_CONFIG.MAX_TOKENS.title,\n description: CHAT_CONFIG.MAX_TOKENS.description,\n min: 0,\n max: maxOutputTokens,\n step: 1,\n default: 0,\n });\n\nconst stop = MultiStringConfigItem({\n param: \"stop\",\n title: CHAT_CONFIG.STOP(4).title,\n description: CHAT_CONFIG.STOP(4).description,\n max: 4,\n});\n\nconst topP = RangeConfigItem({\n param: \"top_p\",\n title: CHAT_CONFIG.TOP_P.title,\n description: CHAT_CONFIG.TOP_P.description,\n min: 0,\n max: 1,\n step: 0.01,\n default: 1,\n});\n\nconst frequencyPenalty = RangeConfigItem({\n param: \"frequency_penalty\",\n title: CHAT_CONFIG.FREQUENCY_PENALTY.title,\n description: CHAT_CONFIG.FREQUENCY_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst presencePenalty = RangeConfigItem({\n param: \"presence_penalty\",\n title: CHAT_CONFIG.PRESENCE_PENALTY.title,\n description: CHAT_CONFIG.PRESENCE_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst seed = RangeConfigItem({\n param: \"seed\",\n title: CHAT_CONFIG.SEED.title,\n description: CHAT_CONFIG.SEED.description,\n min: 0,\n max: 1000000,\n step: 1,\n default: 0,\n});\n\nconst responseFormat = SelectStringConfigItem({\n param: \"response_format\",\n title: CHAT_CONFIG.RESPONSE_FORMAT.title,\n description: CHAT_CONFIG.RESPONSE_FORMAT.description,\n default: \"text\",\n choices: [\"text\", \"json_object\"],\n});\n\nconst toolChoice = SelectStringConfigItem({\n param: \"tool_choice\",\n title: \"Tool choice\",\n description:\n \"Controls which (if any) tool is called by the model. \\\n 'none' means the model will not call a function. \\\n 'auto' means the model can pick between generating a message or calling a tool.\",\n default: \"auto\",\n choices: [\"auto\", \"required\", \"none\"],\n});\n\nexport { frequencyPenalty, maxTokens, presencePenalty, seed, stop, temperature, toolChoice, topP, responseFormat };\n","import { z } from \"zod\";\n\nimport {\n frequencyPenalty,\n maxTokens,\n presencePenalty,\n responseFormat,\n seed,\n stop,\n temperature,\n toolChoice,\n topP,\n} from \"./common.config.chat-model.groq\";\n\nconst ChatModelBaseConfigSchema = (maxOutputTokens: number) =>\n z.object({\n temperature: temperature.schema,\n maxTokens: maxTokens(maxOutputTokens).schema,\n stop: stop.schema,\n topP: topP.schema,\n frequencyPenalty: frequencyPenalty.schema,\n presencePenalty: presencePenalty.schema,\n seed: seed.schema.transform((value) => (value === 0 ? undefined : value)),\n responseFormat: responseFormat.schema,\n toolChoice: toolChoice.schema,\n });\n\nconst ChatModelBaseConfigDef = (maxOutputTokens: number) =>\n ({\n temperature: temperature.def,\n maxTokens: maxTokens(maxOutputTokens).def,\n stop: stop.def,\n topP: topP.def,\n frequencyPenalty: frequencyPenalty.def,\n presencePenalty: presencePenalty.def,\n seed: seed.def,\n responseFormat: responseFormat.def,\n toolChoice: toolChoice.def,\n }) as const;\n\nexport { ChatModelBaseConfigDef, ChatModelBaseConfigSchema };\n","import { ChatModelBaseConfigDef, ChatModelBaseConfigSchema } from \"./chat-model\";\n\nconst GroqChatModelConfigs = {\n base: (maxOutputTokens: number) => ({\n def: ChatModelBaseConfigDef(maxOutputTokens),\n schema: ChatModelBaseConfigSchema(maxOutputTokens),\n }),\n} as const;\n\nexport { GroqChatModelConfigs };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Gemma2_9b_ITLiteral = \"gemma2-9b-it\" as const;\n// https://huggingface.co/google/gemma-2-9b-it\nconst Gemma2_9b_ITDescription =\n \"Gemma is a family of lightweight, state-of-the-art open models from Google, \\\n built from the same research and technology used to create the Gemini models.\";\n\nconst Gemma2_9b_ITSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Gemma2_9b_ITLiteral,\n description: Gemma2_9b_ITDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Gemma2_9b_ITOptions = BaseChatModelOptions;\ntype Gemma2_9b_ITOptionsType = z.infer<typeof Gemma2_9b_ITOptions>;\n\nclass Gemma2_9b_IT extends BaseChatModelGroq {\n constructor(options: Gemma2_9b_ITOptionsType) {\n super(Gemma2_9b_ITSchema, options);\n }\n}\n\nexport { Gemma2_9b_IT, Gemma2_9b_ITOptions, Gemma2_9b_ITSchema, Gemma2_9b_ITLiteral, type Gemma2_9b_ITOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_8bLiteral = \"llama-3.1-8b-instant\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_8bDescription =\n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_8bLiteral,\n description: Llama_3_1_8bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_8b_Options = BaseChatModelOptions;\ntype Llama_3_1_8b_OptionsType = z.infer<typeof Llama_3_1_8b_Options>;\n\nclass Llama_3_1_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_8b_OptionsType) {\n super(Llama_3_1_8bSchema, options);\n }\n}\n\nexport { Llama_3_1_8b, Llama_3_1_8b_Options, Llama_3_1_8bSchema, Llama_3_1_8bLiteral, type Llama_3_1_8b_OptionsType };\n","import { z } from \"zod\";\n\nimport { OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap } from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_11b_VisionLiteral = \"llama-3.2-11b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-11B-Vision\nconst Llama_3_2_11b_VisionDescription =\n \"The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, \\\n and answering general questions about an image. \\\n The models outperform many of the available open source and closed multimodal models on common industry benchmarks.\";\n\nconst Llama_3_2_11b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_11b_VisionLiteral,\n description: Llama_3_2_11b_VisionDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_11b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_11b_VisionOptionsType = z.infer<typeof Llama_3_2_11b_VisionOptions>;\n\nclass Llama_3_2_11b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_11b_VisionOptionsType) {\n super(Llama_3_2_11b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n type Llama_3_2_11b_VisionOptionsType,\n};\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_1bLiteral = \"llama-3.2-1b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-1B\nconst Llama_3_2_1bDescription =\n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_1bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_1bLiteral,\n description: Llama_3_2_1bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_1b_Options = BaseChatModelOptions;\ntype Llama_3_2_1b_OptionsType = z.infer<typeof Llama_3_2_1b_Options>;\n\nclass Llama_3_2_1b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_1b_OptionsType) {\n super(Llama_3_2_1bSchema, options);\n }\n}\n\nexport { Llama_3_2_1b, Llama_3_2_1b_Options, Llama_3_2_1bSchema, Llama_3_2_1bLiteral, type Llama_3_2_1b_OptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_3bLiteral = \"llama-3.2-3b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-3B\nconst Llama_3_2_3bDescription =\n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_3bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_3bLiteral,\n description: Llama_3_2_3bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_3b_Options = BaseChatModelOptions;\ntype Llama_3_2_3b_OptionsType = z.infer<typeof Llama_3_2_3b_Options>;\n\nclass Llama_3_2_3b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_3b_OptionsType) {\n super(Llama_3_2_3bSchema, options);\n }\n}\n\nexport { Llama_3_2_3b, Llama_3_2_3b_Options, Llama_3_2_3bSchema, Llama_3_2_3bLiteral, type Llama_3_2_3b_OptionsType };\n","import { z } from \"zod\";\n\nimport { OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap } from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_90b_VisionLiteral = \"llama-3.2-90b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-90B-Vision\nconst Llama_3_2_90b_VisionDescription =\n \"The Llama 3.2-90B Vision instruction-tuned models are optimized for advanced visual recognition, \\\n complex image reasoning, detailed captioning, and answering intricate questions about images. \\\n These models achieve state-of-the-art results on multiple industry benchmarks for multimodal tasks.\";\n\nconst Llama_3_2_90b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_90b_VisionLiteral,\n description: Llama_3_2_90b_VisionDescription,\n maxInputTokens: 131072,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_90b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_90b_VisionOptionsType = z.infer<typeof Llama_3_2_90b_VisionOptions>;\n\nclass Llama_3_2_90b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_90b_VisionOptionsType) {\n super(Llama_3_2_90b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_90b_Vision,\n Llama_3_2_90b_VisionLiteral,\n Llama_3_2_90b_VisionOptions,\n Llama_3_2_90b_VisionSchema,\n type Llama_3_2_90b_VisionOptionsType,\n};\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70bLiteral = \"llama3-70b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct\nconst Llama_3_70bDescription =\n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70bLiteral,\n description: Llama_3_70bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70bOptions = BaseChatModelOptions;\ntype Llama_3_70bOptionsType = z.infer<typeof Llama_3_70bOptions>;\n\nclass Llama_3_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_70bOptionsType) {\n super(Llama_3_70bSchema, options);\n }\n}\n\nexport { Llama_3_70b, Llama_3_70bOptions, Llama_3_70bSchema, Llama_3_70bLiteral, type Llama_3_70bOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8bLiteral = \"llama3-8b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct\nconst Llama_3_8bDescription =\n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8bLiteral,\n description: Llama_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8bOptions = BaseChatModelOptions;\ntype Llama_3_8bOptionsType = z.infer<typeof Llama_3_8bOptions>;\n\nclass Llama_3_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_8bOptionsType) {\n super(Llama_3_8bSchema, options);\n }\n}\n\nexport { Llama_3_8b, Llama_3_8bOptions, Llama_3_8bSchema, Llama_3_8bLiteral, type Llama_3_8bOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst LlamaGuard_3_8bLiteral = \"llama-guard-3-8b\" as const;\n// https://huggingface.co/meta-llama/Llama-Guard-3-8B\nconst LlamaGuard_3_8bDescription = \"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.\";\n\nconst LlamaGuard_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: LlamaGuard_3_8bLiteral,\n description: LlamaGuard_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst LlamaGuard_3_8bOptions = BaseChatModelOptions;\ntype LlamaGuard_3_8bOptionsType = z.infer<typeof LlamaGuard_3_8bOptions>;\n\nclass LlamaGuard_3_8b extends BaseChatModelGroq {\n constructor(options: LlamaGuard_3_8bOptionsType) {\n super(LlamaGuard_3_8bSchema, options);\n }\n}\n\nexport { LlamaGuard_3_8b, LlamaGuard_3_8bOptions, LlamaGuard_3_8bSchema, LlamaGuard_3_8bLiteral, type LlamaGuard_3_8bOptionsType };\n","import { z } from \"zod\";\n\nimport { ChatModelSchemaType, ChatModelV1, EmbeddingModelSchemaType, EmbeddingModelV1, ProviderError, ProviderV1 } from \"@adaline/provider\";\n\nimport * as Models from \"../models\";\n\nconst ProviderLiteral = \"groq\";\nclass Groq<C extends Models.BaseChatModelOptionsType, E extends Record<string, any> = Record<string, any>> implements ProviderV1<C, E> {\n readonly version = \"v1\" as const;\n readonly name = ProviderLiteral;\n static readonly baseUrl = \"https://api.groq.com/openai/v1\";\n\n private readonly chatModelFactories: Record<\n string,\n {\n model: { new (options: any): ChatModelV1 };\n modelOptions: z.ZodType<any>;\n modelSchema: ChatModelSchemaType;\n }\n > = {\n [Models.Gemma2_9b_ITLiteral]: {\n model: Models.Gemma2_9b_IT,\n modelOptions: Models.Gemma2_9b_ITOptions,\n modelSchema: Models.Gemma2_9b_ITSchema,\n },\n [Models.LlamaGuard_3_8bLiteral]: {\n model: Models.LlamaGuard_3_8b,\n modelOptions: Models.LlamaGuard_3_8bOptions,\n modelSchema: Models.LlamaGuard_3_8bSchema,\n },\n [Models.Llama_3_8bLiteral]: {\n model: Models.Llama_3_8b,\n modelOptions: Models.Llama_3_8bOptions,\n modelSchema: Models.Llama_3_8bSchema,\n },\n [Models.Llama_3_70bLiteral]: {\n model: Models.Llama_3_70b,\n modelOptions: Models.Llama_3_70bOptions,\n modelSchema: Models.Llama_3_70bSchema,\n },\n [Models.Llama_3_1_8bLiteral]: {\n model: Models.Llama_3_1_8b,\n modelOptions: Models.Llama_3_1_8b_Options,\n modelSchema: Models.Llama_3_1_8bSchema,\n },\n [Models.Llama_3_2_11b_VisionLiteral]: {\n model: Models.Llama_3_2_11b_Vision,\n modelOptions: Models.Llama_3_2_11b_VisionOptions,\n modelSchema: Models.Llama_3_2_11b_VisionSchema,\n },\n [Models.Llama_3_2_90b_VisionLiteral]: {\n model: Models.Llama_3_2_90b_Vision,\n modelOptions: Models.Llama_3_2_90b_VisionOptions,\n modelSchema: Models.Llama_3_2_90b_VisionSchema,\n },\n [Models.Llama_3_2_3bLiteral]: {\n model: Models.Llama_3_2_3b,\n modelOptions: Models.Llama_3_2_3b_Options,\n modelSchema: Models.Llama_3_2_3bSchema,\n },\n [Models.Llama_3_2_1bLiteral]: {\n model: Models.Llama_3_2_1b,\n modelOptions: Models.Llama_3_2_1b_Options,\n modelSchema: Models.Llama_3_2_1bSchema,\n },\n };\n\n private readonly embeddingModelFactories: Record<\n string,\n {\n model: { new (options: any): EmbeddingModelV1 };\n modelOptions: z.ZodType<any>;\n modelSchema: EmbeddingModelSchemaType;\n }\n > = {};\n\n chatModelLiterals(): string[] {\n return Object.keys(this.chatModelFactories);\n }\n\n chatModelSchemas(): Record<string, ChatModelSchemaType> {\n return Object.keys(this.chatModelFactories).reduce(\n (acc, key) => {\n acc[key] = this.chatModelFactories[key].modelSchema;\n return acc;\n },\n {} as Record<string, ChatModelSchemaType>\n );\n }\n\n chatModel(options: C): ChatModelV1 {\n const modelName = options.modelName;\n if (!(modelName in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${modelName} not found`,\n cause: new Error(`Groq chat model: ${modelName} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.chatModelFactories[modelName].model;\n const parsedOptions = this.chatModelFactories[modelName].modelOptions.parse(options);\n return new model(parsedOptions);\n }\n\n embeddingModelLiterals(): string[] {\n return Object.keys(this.embeddingModelFactories);\n }\n\n embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType> {\n return Object.keys(this.embeddingModelFactories).reduce(\n (acc, key) => {\n acc[key] = this.embeddingModelFactories[key].modelSchema;\n return acc;\n },\n {} as Record<string, EmbeddingModelSchemaType>\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n embeddingModel(options: E): EmbeddingModelV1 {\n throw new ProviderError({\n info: \"Groq does not support embedding models yet\",\n cause: new Error(\"Groq does not support embedding models yet\"),\n });\n }\n}\n\nexport { Groq };\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/models/pricing.json","../src/models/chat-models/base-chat-model.groq.ts","../src/configs/chat-model/common.config.chat-model.groq.ts","../src/configs/chat-model/base.config.chat-model.groq.ts","../src/configs/configs.groq.ts","../src/models/chat-models/gemma2-9b-it.groq.ts","../src/models/chat-models/llama-3-1-8b.groq.ts","../src/models/chat-models/llama-3-2-11b-vision.groq.ts","../src/models/chat-models/llama-3-2-1b.groq.ts","../src/models/chat-models/llama-3-2-3b.groq.ts","../src/models/chat-models/llama-3-2-90b-vision.groq.ts","../src/models/chat-models/llama-3-70b.groq.ts","../src/models/chat-models/llama-3-8b.groq.ts","../src/models/chat-models/llama-guard-3-8b.groq.ts","../src/provider/provider.groq.ts"],"names":["pricing_default","BaseChatModelOptions","z","BaseChatModelGroq","BaseChatModel","modelSchema","options","parsedOptions","baseUrl","Groq","messages","hasSystemRole","msg","hasImageModality","content","InvalidMessagesError","transformedMessages","message","ModelResponseError","temperature","RangeConfigItem","CHAT_CONFIG","maxTokens","maxOutputTokens","stop","MultiStringConfigItem","topP","frequencyPenalty","presencePenalty","seed","responseFormat","SelectStringConfigItem","toolChoice","ChatModelBaseConfigSchema","value","ChatModelBaseConfigDef","GroqChatModelConfigs","Gemma2_9b_ITLiteral","Gemma2_9b_ITDescription","Gemma2_9b_ITSchema","ChatModelSchema","OpenAIChatModelRoles","OpenAIChatModelTextToolModalitiesEnum","OpenAIChatModelRolesMap","OpenAIChatModelTextToolModalities","Gemma2_9b_ITOptions","Gemma2_9b_IT","Llama_3_1_8bLiteral","Llama_3_1_8bDescription","Llama_3_1_8bSchema","Llama_3_1_8b_Options","Llama_3_1_8b","Llama_3_2_11b_VisionLiteral","Llama_3_2_11b_VisionDescription","Llama_3_2_11b_VisionSchema","OpenAIChatModelModalitiesEnum","OpenAIChatModelModalities","Llama_3_2_11b_VisionOptions","Llama_3_2_11b_Vision","Llama_3_2_1bLiteral","Llama_3_2_1bDescription","Llama_3_2_1bSchema","Llama_3_2_1b_Options","Llama_3_2_1b","Llama_3_2_3bLiteral","Llama_3_2_3bDescription","Llama_3_2_3bSchema","Llama_3_2_3b_Options","Llama_3_2_3b","Llama_3_2_90b_VisionLiteral","Llama_3_2_90b_VisionDescription","Llama_3_2_90b_VisionSchema","Llama_3_2_90b_VisionOptions","Llama_3_2_90b_Vision","Llama_3_70bLiteral","Llama_3_70bDescription","Llama_3_70bSchema","Llama_3_70bOptions","Llama_3_70b","Llama_3_8bLiteral","Llama_3_8bDescription","Llama_3_8bSchema","Llama_3_8bOptions","Llama_3_8b","LlamaGuard_3_8bLiteral","LlamaGuard_3_8bDescription","LlamaGuard_3_8bSchema","LlamaGuard_3_8bOptions","LlamaGuard_3_8b","ProviderLiteral","acc","key","modelName","ProviderError","model"],"mappings":";;;;AAAA,IAAAA,CAAAA,CAAA,CACE,cAAA,CAAgB,CACd,SAAA,CAAa,eACb,QAAY,CAAA,KAAA,CACZ,YAAe,CACb,CACE,UAAa,CACb,CAAA,SAAA,CAAa,KACb,MAAU,CAAA,CACR,KAAQ,CACN,oBAAA,CAAwB,GACxB,qBAAyB,CAAA,EAC3B,CACF,CACF,CACF,CACF,CAAA,CACA,sBAAwB,CAAA,CACtB,UAAa,sBACb,CAAA,QAAA,CAAY,MACZ,WAAe,CAAA,CACb,CACE,SAAa,CAAA,CAAA,CACb,UAAa,IACb,CAAA,MAAA,CAAU,CACR,IAAQ,CAAA,CACN,qBAAwB,GACxB,CAAA,qBAAA,CAAyB,GAC3B,CACF,CACF,CACF,CACF,CACA,CAAA,8BAAA,CAAgC,CAC9B,SAAa,CAAA,8BAAA,CACb,SAAY,KACZ,CAAA,WAAA,CAAe,CACb,CACE,SAAA,CAAa,CACb,CAAA,SAAA,CAAa,IACb,CAAA,MAAA,CAAU,CACR,IAAQ,CAAA,CACN,qBAAwB,GACxB,CAAA,qBAAA,CAAyB,GAC3B,CACF,CACF,CACF,CACF,CACA,CAAA,sBAAA,CAAwB,CACtB,SAAa,CAAA,sBAAA,CACb,SAAY,KACZ,CAAA,WAAA,CAAe,CACb,CACE,SAAA,CAAa,EACb,SAAa,CAAA,IAAA,CACb,OAAU,CACR,IAAA,CAAQ,CACN,oBAAwB,CAAA,GAAA,CACxB,sBAAyB,GAC3B,CACF,CACF,CACF,CACF,CAAA,CACA,uBAAwB,CACtB,SAAA,CAAa,uBACb,QAAY,CAAA,KAAA,CACZ,YAAe,CACb,CACE,UAAa,CACb,CAAA,SAAA,CAAa,KACb,MAAU,CAAA,CACR,KAAQ,CACN,oBAAA,CAAwB,IACxB,qBAAyB,CAAA,GAC3B,CACF,CACF,CACF,CACF,EACA,8BAAgC,CAAA,CAC9B,UAAa,8BACb,CAAA,QAAA,CAAY,MACZ,WAAe,CAAA,CACb,CACE,SAAa,CAAA,CAAA,CACb,UAAa,IACb,CAAA,MAAA,CAAU,CACR,IAAQ,CAAA,CACN,qBAAwB,EACxB,CAAA,qBAAA,CAAyB,EAC3B,CACF,CACF,CACF,CACF,CACA,CAAA,iBAAA,CAAmB,CACjB,SAAa,CAAA,iBAAA,CACb,SAAY,KACZ,CAAA,WAAA,CAAe,CACb,CACE,SAAa,CAAA,CAAA,CACb,UAAa,IACb,CAAA,MAAA,CAAU,CACR,IAAQ,CAAA,CACN,qBAAwB,GACxB,CAAA,qBAAA,CAAyB,GAC3B,CACF,CACF,CACF,CACF,CACA,CAAA,gBAAA,CAAkB,CAChB,SAAa,CAAA,gBAAA,CACb,SAAY,KACZ,CAAA,WAAA,CAAe,CACb,CACE,SAAA,CAAa,EACb,SAAa,CAAA,IAAA,CACb,OAAU,CACR,IAAA,CAAQ,CACN,oBAAwB,CAAA,GAAA,CACxB,qBAAyB,CAAA,GAC3B,CACF,CACF,CACF,CACF,CAAA,CACA,mBAAoB,CAClB,SAAA,CAAa,mBACb,QAAY,CAAA,KAAA,CACZ,YAAe,CACb,CACE,UAAa,CACb,CAAA,SAAA,CAAa,KACb,MAAU,CAAA,CACR,KAAQ,CACN,oBAAA,CAAwB,EACxB,CAAA,qBAAA,CAAyB,EAC3B,CACF,CACF,CACF,CACF,CACF,CCxIA,CAAA,IAAMC,EAAuBC,GAAE,CAAA,MAAA,CAAO,CACpC,SAAWA,CAAAA,GAAAA,CAAE,QAAS,CAAA,GAAA,CAAI,CAAC,CAC3B,CAAA,MAAA,CAAQA,IAAE,MAAO,EAAA,CAAE,GAAI,CAAA,CAAC,CAC1B,CAAC,EAGKC,CAAN,CAAA,cAAgCC,aAAc,CAO5C,WAAA,CAAYC,EAAkCC,CAAmC,CAAA,CAC/E,IAAMC,CAAAA,CAAgBN,CAAqB,CAAA,KAAA,CAAMK,CAAO,CAClDE,CAAAA,CAAAA,CAAUC,EAAK,OACrB,CAAA,KAAA,CAAMJ,EAAa,CACjB,SAAA,CAAWE,CAAc,CAAA,SAAA,CACzB,MAAQA,CAAAA,CAAAA,CAAc,OACtB,OAASC,CAAAA,CAAAA,CACT,gBAAiB,CAAGA,EAAAA,CAAO,oBAC3B,aAAe,CAAA,CAAA,EAAGA,CAAO,CAC3B,iBAAA,CAAA,CAAC,EAfH,IAAS,CAAA,OAAA,CAAU,KAgBjB,IAAK,CAAA,WAAA,CAAcH,EACnB,IAAK,CAAA,SAAA,CAAYE,CAAc,CAAA,SAAA,CAC/B,IAAK,CAAA,UAAA,CAAaA,EAAc,OAClC,CAEA,kBAAkBG,CAAqC,CAAA,CACrD,IAAMC,CAAgBD,CAAAA,CAAAA,CAAS,IAAME,CAAAA,CAAAA,EAAQA,CAAI,CAAA,IAAA,GAAS,QAAQ,CAC5DC,CAAAA,CAAAA,CAAmBH,EAAS,IAAME,CAAAA,CAAAA,EAAQA,EAAI,OAAQ,CAAA,IAAA,CAAME,CAAiBA,EAAAA,CAAAA,CAAQ,QAAa,GAAA,OAAO,CAAC,CAEhH,CAAA,GAAIH,GAAiBE,CACnB,CAAA,MAAM,IAAIE,oBAAqB,CAAA,CAC7B,KAAM,CAAwC,qCAAA,EAAA,IAAA,CAAK,SAAS,CAC5D,CAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAM,CAAA,8DAA8D,CACjF,CAAC,CAAA,CAGH,IAAMC,CAAAA,CAAsB,KAAM,CAAA,iBAAA,CAAkBN,CAAQ,CAI5D,CAAA,OAAAM,EAAoB,QAAS,CAAA,OAAA,CAASC,GAAY,CAC5CA,CAAAA,CAAQ,IAAS,GAAA,QAAA,CACf,OAAOA,CAAAA,CAAQ,SAAY,QAC7BA,GAAAA,CAAAA,CAAQ,QAAUA,CAAQ,CAAA,OAAA,CAAQ,IAAKH,CAAYA,EAAAA,CAAAA,CAAQ,IAAI,CAAA,CAAE,IAAK,CAAA,CAAA;AAAA,CAAI,GAEnEG,CAAQ,CAAA,IAAA,GAAS,aACtBA,CAAQ,CAAA,OAAA,EAAW,OAAOA,CAAQ,CAAA,OAAA,EAAY,WAChDA,CAAQ,CAAA,OAAA,CAAUA,EAAQ,OAAQ,CAAA,GAAA,CAAKH,GAAYA,CAAQ,CAAA,IAAI,EAAE,IAAK,CAAA,CAAA;AAAA,CAAI,CAGhF,EAAA,CAAC,CAEME,CAAAA,CACT,CACA,eAAsC,EAAA,CAEpC,GAAI,EAAE,IAAK,CAAA,SAAA,IAAahB,CACtB,CAAA,CAAA,MAAM,IAAIkB,kBAAmB,CAAA,CAC3B,IAAM,CAAA,CAAA,mCAAA,EAAsC,IAAK,CAAA,SAAS,CAC1D,CAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAM,CAAA,CAAA,0CAAA,EAA6C,IAAK,CAAA,SAAS,GAAG,CACjF,CAAC,CAIH,CAAA,OADclB,EAAY,IAAK,CAAA,SAAqC,CAEtE,CACF,EC5EA,IAAMmB,CAAcC,CAAAA,eAAAA,CAAgB,CAClC,KAAO,CAAA,aAAA,CACP,KAAOC,CAAAA,WAAAA,CAAY,WAAY,CAAA,KAAA,CAC/B,WAAaA,CAAAA,WAAAA,CAAY,YAAY,WACrC,CAAA,GAAA,CAAK,CACL,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,CAAA,CAEKC,CAAaC,CAAAA,CAAAA,EACjBH,eAAgB,CAAA,CACd,KAAO,CAAA,YAAA,CACP,MAAOC,WAAY,CAAA,UAAA,CAAW,KAC9B,CAAA,WAAA,CAAaA,YAAY,UAAW,CAAA,WAAA,CACpC,GAAK,CAAA,CAAA,CACL,IAAKE,CACL,CAAA,IAAA,CAAM,CACN,CAAA,OAAA,CAAS,CACX,CAAC,CAEGC,CAAAA,CAAAA,CAAOC,sBAAsB,CACjC,KAAA,CAAO,MACP,CAAA,KAAA,CAAOJ,YAAY,IAAK,CAAA,CAAC,CAAE,CAAA,KAAA,CAC3B,YAAaA,WAAY,CAAA,IAAA,CAAK,CAAC,CAAA,CAAE,WACjC,CAAA,GAAA,CAAK,CACP,CAAC,EAEKK,CAAON,CAAAA,eAAAA,CAAgB,CAC3B,KAAA,CAAO,QACP,KAAOC,CAAAA,WAAAA,CAAY,KAAM,CAAA,KAAA,CACzB,YAAaA,WAAY,CAAA,KAAA,CAAM,WAC/B,CAAA,GAAA,CAAK,CACL,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,IACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKM,CAAmBP,CAAAA,eAAAA,CAAgB,CACvC,KAAA,CAAO,oBACP,KAAOC,CAAAA,WAAAA,CAAY,iBAAkB,CAAA,KAAA,CACrC,WAAaA,CAAAA,WAAAA,CAAY,iBAAkB,CAAA,WAAA,CAC3C,IAAK,CACL,CAAA,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,IACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKO,EAAkBR,eAAgB,CAAA,CACtC,KAAO,CAAA,kBAAA,CACP,KAAOC,CAAAA,WAAAA,CAAY,gBAAiB,CAAA,KAAA,CACpC,YAAaA,WAAY,CAAA,gBAAA,CAAiB,WAC1C,CAAA,GAAA,CAAK,CACL,CAAA,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,IACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKQ,CAAOT,CAAAA,eAAAA,CAAgB,CAC3B,KAAA,CAAO,OACP,KAAOC,CAAAA,WAAAA,CAAY,IAAK,CAAA,KAAA,CACxB,YAAaA,WAAY,CAAA,IAAA,CAAK,WAC9B,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,GAAA,CACL,IAAM,CAAA,CAAA,CACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKS,EAAiBC,sBAAuB,CAAA,CAC5C,KAAO,CAAA,iBAAA,CACP,KAAOV,CAAAA,WAAAA,CAAY,eAAgB,CAAA,KAAA,CACnC,YAAaA,WAAY,CAAA,eAAA,CAAgB,WACzC,CAAA,OAAA,CAAS,MACT,CAAA,OAAA,CAAS,CAAC,MAAA,CAAQ,aAAa,CACjC,CAAC,CAEKW,CAAAA,CAAAA,CAAaD,uBAAuB,CACxC,KAAA,CAAO,aACP,CAAA,KAAA,CAAO,cACP,WACE,CAAA,+LAAA,CAGF,OAAS,CAAA,MAAA,CACT,OAAS,CAAA,CAAC,MAAQ,CAAA,UAAA,CAAY,MAAM,CACtC,CAAC,CCzED,CAAA,IAAME,EAA6BV,CACjCrB,EAAAA,GAAAA,CAAE,MAAO,CAAA,CACP,YAAaiB,CAAY,CAAA,MAAA,CACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,MACtC,CAAA,IAAA,CAAMC,EAAK,MACX,CAAA,IAAA,CAAME,CAAK,CAAA,MAAA,CACX,iBAAkBC,CAAiB,CAAA,MAAA,CACnC,eAAiBC,CAAAA,CAAAA,CAAgB,OACjC,IAAMC,CAAAA,CAAAA,CAAK,MAAO,CAAA,SAAA,CAAWK,CAAWA,EAAAA,CAAAA,GAAU,CAAI,CAAA,KAAA,CAAA,CAAYA,CAAM,CACxE,CAAA,cAAA,CAAgBJ,CAAe,CAAA,MAAA,CAC/B,UAAYE,CAAAA,CAAAA,CAAW,MACzB,CAAC,EAEGG,EAA0BZ,CAAAA,CAAAA,GAC7B,CACC,WAAA,CAAaJ,CAAY,CAAA,GAAA,CACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAE,CAAA,GAAA,CACtC,IAAMC,CAAAA,CAAAA,CAAK,IACX,IAAME,CAAAA,CAAAA,CAAK,GACX,CAAA,gBAAA,CAAkBC,EAAiB,GACnC,CAAA,eAAA,CAAiBC,CAAgB,CAAA,GAAA,CACjC,IAAMC,CAAAA,CAAAA,CAAK,GACX,CAAA,cAAA,CAAgBC,EAAe,GAC/B,CAAA,UAAA,CAAYE,CAAW,CAAA,GACzB,CCpCF,CAAA,CAAA,IAAMI,CAAuB,CAAA,CAC3B,KAAOb,CAA6B,GAAA,CAClC,GAAKY,CAAAA,EAAAA,CAAuBZ,CAAe,CAAA,CAC3C,MAAQU,CAAAA,CAAAA,CAA0BV,CAAe,CACnD,CAAA,CACF,CCOA,CAAA,IAAMc,EAAsB,cAEtBC,CAAAA,EAAAA,CACJ,6JAGIC,CAAAA,CAAAA,CAAqBC,gBAAgBC,oBAAsBC,CAAAA,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAML,CACN,CAAA,WAAA,CAAaC,GACb,cAAgB,CAAA,IAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,KAAOK,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,EACA,KAAOpC,CAAAA,CAAAA,CAAYqC,CAAmB,CACxC,CAAC,CAEKQ,CAAAA,EAAAA,CAAsB5C,CAGtB6C,CAAAA,CAAAA,CAAN,cAA2B3C,CAAkB,CAC3C,WAAA,CAAYG,EAAkC,CAC5C,KAAA,CAAMiC,CAAoBjC,CAAAA,CAAO,EACnC,CACF,EC3BMyC,IAAAA,CAAAA,CAAsB,uBAEtBC,EACJ,CAAA,6NAAA,CAGIC,CAAqBT,CAAAA,eAAAA,CAAgBC,qBAAsBC,qCAAqC,CAAA,CAAE,KAAM,CAAA,CAC5G,IAAMK,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOL,uBACP,CAAA,UAAA,CAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CAAA,CACA,MAAOpC,CAAY+C,CAAAA,CAAmB,CACxC,CAAC,EAEKG,EAAuBjD,CAAAA,CAAAA,CAGvBkD,CAAN,CAAA,cAA2BhD,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAM2C,CAAAA,CAAAA,CAAoB3C,CAAO,EACnC,CACF,EChCA,IAAM8C,CAA8B,CAAA,8BAAA,CAE9BC,GACJ,0RAIIC,CAAAA,CAAAA,CAA6Bd,eAAgBC,CAAAA,oBAAAA,CAAsBc,6BAA6B,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAMH,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,MAChB,eAAiB,CAAA,IAAA,CACjB,KAAOV,CAAAA,uBAAAA,CACP,UAAYa,CAAAA,yBAAAA,CACZ,MAAQ,CAAA,CACN,IAAKpB,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACA,CAAA,KAAA,CAAOpC,EAAYoD,CAA2B,CAChD,CAAC,CAAA,CAEKK,GAA8BxD,CAG9ByD,CAAAA,CAAAA,CAAN,cAAmCvD,CAAkB,CACnD,WAAA,CAAYG,CAA0C,CAAA,CACpD,MAAMgD,CAA4BhD,CAAAA,CAAO,EAC3C,CACF,ECvBMqD,IAAAA,CAAAA,CAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,qQAGIC,CAAqBrB,CAAAA,eAAAA,CAAgBC,oBAAsBC,CAAAA,qCAAqC,EAAE,KAAM,CAAA,CAC5G,IAAMiB,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOjB,uBACP,CAAA,UAAA,CAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CAAA,CACA,KAAOpC,CAAAA,CAAAA,CAAY2D,CAAmB,CACxC,CAAC,CAEKG,CAAAA,EAAAA,CAAuB7D,EAGvB8D,CAAN,CAAA,cAA2B5D,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAMuD,EAAoBvD,CAAO,EACnC,CACF,EC3BM0D,IAAAA,CAAAA,CAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,qQAGIC,CAAqB1B,CAAAA,eAAAA,CAAgBC,oBAAsBC,CAAAA,qCAAqC,EAAE,KAAM,CAAA,CAC5G,IAAMsB,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOtB,uBACP,CAAA,UAAA,CAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CAAA,CACA,KAAOpC,CAAAA,CAAAA,CAAYgE,CAAmB,CACxC,CAAC,CAEKG,CAAAA,EAAAA,CAAuBlE,EAGvBmE,CAAN,CAAA,cAA2BjE,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAM4D,EAAoB5D,CAAO,EACnC,CACF,MChCM+D,CAA8B,CAAA,8BAAA,CAE9BC,EACJ,CAAA,wSAAA,CAIIC,EAA6B/B,eAAgBC,CAAAA,oBAAAA,CAAsBc,6BAA6B,CAAA,CAAE,MAAM,CAC5G,IAAA,CAAMc,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,MAChB,CAAA,eAAA,CAAiB,KACjB,KAAO3B,CAAAA,uBAAAA,CACP,UAAYa,CAAAA,yBAAAA,CACZ,MAAQ,CAAA,CACN,GAAKpB,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACA,CAAA,KAAA,CAAOpC,CAAYqE,CAAAA,CAA2B,CAChD,CAAC,CAAA,CAEKG,EAA8BvE,CAAAA,CAAAA,CAG9BwE,EAAN,cAAmCtE,CAAkB,CACnD,WAAA,CAAYG,CAA0C,CAAA,CACpD,KAAMiE,CAAAA,CAAAA,CAA4BjE,CAAO,EAC3C,CACF,ECvBA,IAAMoE,EAAqB,iBAErBC,CAAAA,EAAAA,CACJ,yKAGIC,CAAAA,CAAAA,CAAoBpC,gBAAgBC,oBAAsBC,CAAAA,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC3G,IAAMgC,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,IAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAOhC,uBACP,CAAA,UAAA,CAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,EACA,KAAOpC,CAAAA,CAAAA,CAAY0E,CAAkB,CACvC,CAAC,CAEKG,CAAAA,EAAAA,CAAqB5E,CAGrB6E,CAAAA,CAAAA,CAAN,cAA0B3E,CAAkB,CAC1C,WAAYG,CAAAA,CAAAA,CAAiC,CAC3C,KAAA,CAAMsE,CAAmBtE,CAAAA,CAAO,EAClC,CACF,EC3BMyE,IAAAA,CAAAA,CAAoB,iBAEpBC,EACJ,CAAA,yKAAA,CAGIC,CAAmBzC,CAAAA,eAAAA,CAAgBC,qBAAsBC,qCAAqC,CAAA,CAAE,KAAM,CAAA,CAC1G,KAAMqC,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOrC,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CAAA,CACA,MAAOpC,CAAY+E,CAAAA,CAAiB,CACtC,CAAC,EAEKG,EAAoBjF,CAAAA,CAAAA,CAGpBkF,CAAN,CAAA,cAAyBhF,CAAkB,CACzC,WAAA,CAAYG,CAAgC,CAAA,CAC1C,KAAM2E,CAAAA,CAAAA,CAAkB3E,CAAO,EACjC,CACF,EC3BM8E,IAAAA,CAAAA,CAAyB,mBAEzBC,EAA6B,CAAA,iGAAA,CAE7BC,CAAwB9C,CAAAA,eAAAA,CAAgBC,qBAAsBC,qCAAqC,CAAA,CAAE,KAAM,CAAA,CAC/G,KAAM0C,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAO1C,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CAAA,CACA,MAAOpC,CAAYoF,CAAAA,CAAsB,CAC3C,CAAC,EAEKG,EAAyBtF,CAAAA,CAAAA,CAGzBuF,CAAN,CAAA,cAA8BrF,CAAkB,CAC9C,WAAA,CAAYG,CAAqC,CAAA,CAC/C,KAAMgF,CAAAA,CAAAA,CAAuBhF,CAAO,EACtC,CACF,ECjCA,IAAMmF,EAAkB,CAAA,MAAA,CAClBhF,EAAN,KAAuI,CAAvI,WACE,EAAA,CAAA,IAAA,CAAS,QAAU,IACnB,CAAA,IAAA,CAAS,IAAOgF,CAAAA,EAAAA,CAGhB,IAAiB,CAAA,kBAAA,CAOb,CACF,CAAQpD,CAAmB,EAAG,CAC5B,KAAcS,CAAAA,CAAAA,CACd,aAAqBD,EACrB,CAAA,WAAA,CAAoBN,CACtB,CAAA,CACA,CAAQ6C,CAAsB,EAAG,CAC/B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CACA,CAAA,CAAQP,CAAiB,EAAG,CAC1B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,GACrB,WAAoBD,CAAAA,CACtB,CACA,CAAA,CAAQP,CAAkB,EAAG,CAC3B,KAAA,CAAcI,EACd,YAAqBD,CAAAA,EAAAA,CACrB,WAAoBD,CAAAA,CACtB,EACA,CAAQ7B,CAAmB,EAAG,CAC5B,MAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CAAA,CACA,CAAQG,CAA2B,EAAG,CACpC,KAAA,CAAcM,CACd,CAAA,YAAA,CAAqBD,GACrB,WAAoBH,CAAAA,CACtB,CACA,CAAA,CAAQe,CAA2B,EAAG,CACpC,KAAcI,CAAAA,CAAAA,CACd,YAAqBD,CAAAA,EAAAA,CACrB,WAAoBD,CAAAA,CACtB,EACA,CAAQP,CAAmB,EAAG,CAC5B,MAAcI,CACd,CAAA,YAAA,CAAqBD,EACrB,CAAA,WAAA,CAAoBD,CACtB,CACA,CAAA,CAAQP,CAAmB,EAAG,CAC5B,KAAA,CAAcI,CACd,CAAA,YAAA,CAAqBD,GACrB,WAAoBD,CAAAA,CACtB,CACF,CAAA,CAEA,IAAiB,CAAA,uBAAA,CAOb,GAAC,CAEL,mBAA8B,CAC5B,OAAO,MAAO,CAAA,IAAA,CAAK,IAAK,CAAA,kBAAkB,CAC5C,CAEA,kBAAwD,CACtD,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,kBAAkB,CAAA,CAAE,MAC1C,CAAA,CAAC6B,EAAKC,CACJD,IAAAA,CAAAA,CAAIC,CAAG,CAAA,CAAI,IAAK,CAAA,kBAAA,CAAmBA,CAAG,CAAA,CAAE,YACjCD,CAET,CAAA,CAAA,EACF,CACF,CAEA,SAAUpF,CAAAA,CAAAA,CAAyB,CACjC,IAAMsF,EAAYtF,CAAQ,CAAA,SAAA,CAC1B,GAAI,EAAEsF,CAAa,IAAA,IAAA,CAAK,kBACtB,CAAA,CAAA,MAAM,IAAIC,aAAc,CAAA,CACtB,IAAM,CAAA,CAAA,iBAAA,EAAoBD,CAAS,CACnC,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,oBAAoBA,CAAS,CAAA;AAAA,UAAA,EAC1C,KAAK,iBAAkB,EAAA,CAAE,KAAK,IAAI,CAAC,EAAE,CAC3C,CAAC,CAGH,CAAA,IAAME,EAAQ,IAAK,CAAA,kBAAA,CAAmBF,CAAS,CAAE,CAAA,KAAA,CAC3CrF,EAAgB,IAAK,CAAA,kBAAA,CAAmBqF,CAAS,CAAA,CAAE,aAAa,KAAMtF,CAAAA,CAAO,EACnF,OAAO,IAAIwF,EAAMvF,CAAa,CAChC,CAEA,sBAAA,EAAmC,CACjC,OAAO,MAAA,CAAO,KAAK,IAAK,CAAA,uBAAuB,CACjD,CAEA,qBAAA,EAAkE,CAChE,OAAO,OAAO,IAAK,CAAA,IAAA,CAAK,uBAAuB,CAAE,CAAA,MAAA,CAC/C,CAACmF,CAAKC,CAAAA,CAAAA,IACJD,CAAIC,CAAAA,CAAG,EAAI,IAAK,CAAA,uBAAA,CAAwBA,CAAG,CAAE,CAAA,WAAA,CACtCD,GAET,EACF,CACF,CAGA,eAAepF,CAA8B,CAAA,CAC3C,MAAM,IAAIuF,aAAAA,CAAc,CACtB,IAAM,CAAA,4CAAA,CACN,MAAO,IAAI,KAAA,CAAM,4CAA4C,CAC/D,CAAC,CACH,CACF,EAvHMpF,EAGY,OAAU,CAAA,gCAAA","file":"index.mjs","sourcesContent":["{\n \"gemma2-9b-it\": {\n \"modelName\": \"gemma2-9b-it\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.2,\n \"outputPricePerMillion\": 0.2\n }\n }\n }\n ]\n },\n \"llama-3.1-8b-instant\": {\n \"modelName\": \"llama-3.1-8b-instant\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.05,\n \"outputPricePerMillion\": 0.08\n }\n }\n }\n ]\n },\n \"llama-3.2-11b-vision-preview\": {\n \"modelName\": \"llama-3.2-11b-vision-preview\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.18,\n \"outputPricePerMillion\": 0.18\n }\n }\n }\n ]\n },\n \"llama-3.2-1b-preview\": {\n \"modelName\": \"llama-3.2-1b-preview\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.04,\n \"outputPricePerMillion\": 0.04\n }\n }\n }\n ]\n },\n \"llama-3.2-3b-preview\": {\n \"modelName\": \"llama-3.2-3b-preview\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.06,\n \"outputPricePerMillion\": 0.06\n }\n }\n }\n ]\n },\n \"llama-3.2-90b-vision-preview\": {\n \"modelName\": \"llama-3.2-90b-vision-preview\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.7,\n \"outputPricePerMillion\": 0.8\n }\n }\n }\n ]\n },\n \"llama3-70b-8192\": {\n \"modelName\": \"llama3-70b-8192\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.59,\n \"outputPricePerMillion\": 0.79\n }\n }\n }\n ]\n },\n \"llama3-8b-8192\": {\n \"modelName\": \"llama3-8b-8192\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.05,\n \"outputPricePerMillion\": 0.08\n }\n }\n }\n ]\n },\n \"llama-guard-3-8b\": {\n \"modelName\": \"llama-guard-3-8b\",\n \"currency\": \"USD\",\n \"tokenRanges\": [\n {\n \"minTokens\": 0,\n \"maxTokens\": null,\n \"prices\": {\n \"base\": {\n \"inputPricePerMillion\": 0.2,\n \"outputPricePerMillion\": 0.2\n }\n }\n }\n ]\n }\n}\n","import { z } from \"zod\";\n\nimport { BaseChatModel, OpenAIChatRequestMessageType } from \"@adaline/openai\";\nimport { ChatModelSchemaType, InvalidMessagesError, ModelResponseError, ParamsType } from \"@adaline/provider\";\nimport { ChatModelPriceType, MessageType } from \"@adaline/types\";\n\nimport { Groq } from \"../../provider\";\nimport pricingData from \"../pricing.json\";\n\nconst BaseChatModelOptions = z.object({\n modelName: z.string().min(1),\n apiKey: z.string().min(1),\n});\ntype BaseChatModelOptionsType = z.infer<typeof BaseChatModelOptions>;\n\nclass BaseChatModelGroq extends BaseChatModel {\n readonly version = \"v1\" as const;\n modelSchema: ChatModelSchemaType;\n readonly modelName: string;\n\n private readonly groqApiKey: string;\n\n constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType) {\n const parsedOptions = BaseChatModelOptions.parse(options);\n const baseUrl = Groq.baseUrl;\n super(modelSchema, {\n modelName: parsedOptions.modelName,\n apiKey: parsedOptions.apiKey,\n baseUrl: baseUrl,\n completeChatUrl: `${baseUrl}/chat/completions`,\n streamChatUrl: `${baseUrl}/chat/completions`,\n });\n this.modelSchema = modelSchema;\n this.modelName = parsedOptions.modelName;\n this.groqApiKey = parsedOptions.apiKey;\n }\n\n transformMessages(messages: MessageType[]): ParamsType {\n const hasSystemRole = messages.some((msg) => msg.role === \"system\");\n const hasImageModality = messages.some((msg) => msg.content.some((content: any) => content.modality === \"image\"));\n\n if (hasSystemRole && hasImageModality) {\n throw new InvalidMessagesError({\n info: `Invalid message content for model : '${this.modelName}'`,\n cause: new Error(\"Prompting with images is incompatible with system messages`)\"),\n });\n }\n\n const transformedMessages = super.transformMessages(messages) as { messages: OpenAIChatRequestMessageType[] };\n\n // Groq expects the content to be a string for system and assistant messages\n // OpenAI transformer takes care of validating role and modality\n transformedMessages.messages.forEach((message) => {\n if (message.role === \"system\") {\n if (typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n } else if (message.role === \"assistant\") {\n if (message.content && typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n }\n });\n\n return transformedMessages;\n }\n getModelPricing(): ChatModelPriceType {\n // Check if the modelName exists in pricingData before accessing it\n if (!(this.modelName in pricingData)) {\n throw new ModelResponseError({\n info: `Invalid model pricing for model : '${this.modelName}'`,\n cause: new Error(`No pricing configuration found for model \"${this.modelName}\"`),\n });\n }\n\n const entry = pricingData[this.modelName as keyof typeof pricingData];\n return entry as ChatModelPriceType;\n }\n}\n\nexport { BaseChatModelGroq, BaseChatModelOptions, type BaseChatModelOptionsType };\n","import { CHAT_CONFIG, MultiStringConfigItem, RangeConfigItem, SelectStringConfigItem } from \"@adaline/provider\";\n\nconst temperature = RangeConfigItem({\n param: \"temperature\",\n title: CHAT_CONFIG.TEMPERATURE.title,\n description: CHAT_CONFIG.TEMPERATURE.description,\n min: 0,\n max: 2,\n step: 0.01,\n default: 1,\n});\n\nconst maxTokens = (maxOutputTokens: number) =>\n RangeConfigItem({\n param: \"max_tokens\",\n title: CHAT_CONFIG.MAX_TOKENS.title,\n description: CHAT_CONFIG.MAX_TOKENS.description,\n min: 0,\n max: maxOutputTokens,\n step: 1,\n default: 0,\n });\n\nconst stop = MultiStringConfigItem({\n param: \"stop\",\n title: CHAT_CONFIG.STOP(4).title,\n description: CHAT_CONFIG.STOP(4).description,\n max: 4,\n});\n\nconst topP = RangeConfigItem({\n param: \"top_p\",\n title: CHAT_CONFIG.TOP_P.title,\n description: CHAT_CONFIG.TOP_P.description,\n min: 0,\n max: 1,\n step: 0.01,\n default: 1,\n});\n\nconst frequencyPenalty = RangeConfigItem({\n param: \"frequency_penalty\",\n title: CHAT_CONFIG.FREQUENCY_PENALTY.title,\n description: CHAT_CONFIG.FREQUENCY_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst presencePenalty = RangeConfigItem({\n param: \"presence_penalty\",\n title: CHAT_CONFIG.PRESENCE_PENALTY.title,\n description: CHAT_CONFIG.PRESENCE_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst seed = RangeConfigItem({\n param: \"seed\",\n title: CHAT_CONFIG.SEED.title,\n description: CHAT_CONFIG.SEED.description,\n min: 0,\n max: 1000000,\n step: 1,\n default: 0,\n});\n\nconst responseFormat = SelectStringConfigItem({\n param: \"response_format\",\n title: CHAT_CONFIG.RESPONSE_FORMAT.title,\n description: CHAT_CONFIG.RESPONSE_FORMAT.description,\n default: \"text\",\n choices: [\"text\", \"json_object\"],\n});\n\nconst toolChoice = SelectStringConfigItem({\n param: \"tool_choice\",\n title: \"Tool choice\",\n description:\n \"Controls which (if any) tool is called by the model. \\\n 'none' means the model will not call a function. \\\n 'auto' means the model can pick between generating a message or calling a tool.\",\n default: \"auto\",\n choices: [\"auto\", \"required\", \"none\"],\n});\n\nexport { frequencyPenalty, maxTokens, presencePenalty, seed, stop, temperature, toolChoice, topP, responseFormat };\n","import { z } from \"zod\";\n\nimport {\n frequencyPenalty,\n maxTokens,\n presencePenalty,\n responseFormat,\n seed,\n stop,\n temperature,\n toolChoice,\n topP,\n} from \"./common.config.chat-model.groq\";\n\nconst ChatModelBaseConfigSchema = (maxOutputTokens: number) =>\n z.object({\n temperature: temperature.schema,\n maxTokens: maxTokens(maxOutputTokens).schema,\n stop: stop.schema,\n topP: topP.schema,\n frequencyPenalty: frequencyPenalty.schema,\n presencePenalty: presencePenalty.schema,\n seed: seed.schema.transform((value) => (value === 0 ? undefined : value)),\n responseFormat: responseFormat.schema,\n toolChoice: toolChoice.schema,\n });\n\nconst ChatModelBaseConfigDef = (maxOutputTokens: number) =>\n ({\n temperature: temperature.def,\n maxTokens: maxTokens(maxOutputTokens).def,\n stop: stop.def,\n topP: topP.def,\n frequencyPenalty: frequencyPenalty.def,\n presencePenalty: presencePenalty.def,\n seed: seed.def,\n responseFormat: responseFormat.def,\n toolChoice: toolChoice.def,\n }) as const;\n\nexport { ChatModelBaseConfigDef, ChatModelBaseConfigSchema };\n","import { ChatModelBaseConfigDef, ChatModelBaseConfigSchema } from \"./chat-model\";\n\nconst GroqChatModelConfigs = {\n base: (maxOutputTokens: number) => ({\n def: ChatModelBaseConfigDef(maxOutputTokens),\n schema: ChatModelBaseConfigSchema(maxOutputTokens),\n }),\n} as const;\n\nexport { GroqChatModelConfigs };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Gemma2_9b_ITLiteral = \"gemma2-9b-it\" as const;\n// https://huggingface.co/google/gemma-2-9b-it\nconst Gemma2_9b_ITDescription =\n \"Gemma is a family of lightweight, state-of-the-art open models from Google, \\\n built from the same research and technology used to create the Gemini models.\";\n\nconst Gemma2_9b_ITSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Gemma2_9b_ITLiteral,\n description: Gemma2_9b_ITDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n price: pricingData[Gemma2_9b_ITLiteral],\n});\n\nconst Gemma2_9b_ITOptions = BaseChatModelOptions;\ntype Gemma2_9b_ITOptionsType = z.infer<typeof Gemma2_9b_ITOptions>;\n\nclass Gemma2_9b_IT extends BaseChatModelGroq {\n constructor(options: Gemma2_9b_ITOptionsType) {\n super(Gemma2_9b_ITSchema, options);\n }\n}\n\nexport { Gemma2_9b_IT, Gemma2_9b_ITLiteral, Gemma2_9b_ITOptions, Gemma2_9b_ITSchema, type Gemma2_9b_ITOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_8bLiteral = \"llama-3.1-8b-instant\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_8bDescription =\n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_8bLiteral,\n description: Llama_3_1_8bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_1_8bLiteral],\n});\n\nconst Llama_3_1_8b_Options = BaseChatModelOptions;\ntype Llama_3_1_8b_OptionsType = z.infer<typeof Llama_3_1_8b_Options>;\n\nclass Llama_3_1_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_8b_OptionsType) {\n super(Llama_3_1_8bSchema, options);\n }\n}\n\nexport { Llama_3_1_8b, Llama_3_1_8b_Options, Llama_3_1_8bLiteral, Llama_3_1_8bSchema, type Llama_3_1_8b_OptionsType };\n","import { z } from \"zod\";\n\nimport { OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap } from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_11b_VisionLiteral = \"llama-3.2-11b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-11B-Vision\nconst Llama_3_2_11b_VisionDescription =\n \"The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, \\\n and answering general questions about an image. \\\n The models outperform many of the available open source and closed multimodal models on common industry benchmarks.\";\n\nconst Llama_3_2_11b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_11b_VisionLiteral,\n description: Llama_3_2_11b_VisionDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_2_11b_VisionLiteral],\n});\n\nconst Llama_3_2_11b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_11b_VisionOptionsType = z.infer<typeof Llama_3_2_11b_VisionOptions>;\n\nclass Llama_3_2_11b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_11b_VisionOptionsType) {\n super(Llama_3_2_11b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionLiteral,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n type Llama_3_2_11b_VisionOptionsType,\n};\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_1bLiteral = \"llama-3.2-1b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-1B\nconst Llama_3_2_1bDescription =\n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_1bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_1bLiteral,\n description: Llama_3_2_1bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_2_1bLiteral],\n});\n\nconst Llama_3_2_1b_Options = BaseChatModelOptions;\ntype Llama_3_2_1b_OptionsType = z.infer<typeof Llama_3_2_1b_Options>;\n\nclass Llama_3_2_1b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_1b_OptionsType) {\n super(Llama_3_2_1bSchema, options);\n }\n}\n\nexport { Llama_3_2_1b, Llama_3_2_1b_Options, Llama_3_2_1bLiteral, Llama_3_2_1bSchema, type Llama_3_2_1b_OptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_3bLiteral = \"llama-3.2-3b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-3B\nconst Llama_3_2_3bDescription =\n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_3bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_3bLiteral,\n description: Llama_3_2_3bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_2_3bLiteral],\n});\n\nconst Llama_3_2_3b_Options = BaseChatModelOptions;\ntype Llama_3_2_3b_OptionsType = z.infer<typeof Llama_3_2_3b_Options>;\n\nclass Llama_3_2_3b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_3b_OptionsType) {\n super(Llama_3_2_3bSchema, options);\n }\n}\n\nexport { Llama_3_2_3b, Llama_3_2_3b_Options, Llama_3_2_3bLiteral, Llama_3_2_3bSchema, type Llama_3_2_3b_OptionsType };\n","import { z } from \"zod\";\n\nimport { OpenAIChatModelModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelRoles, OpenAIChatModelRolesMap } from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_90b_VisionLiteral = \"llama-3.2-90b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-90B-Vision\nconst Llama_3_2_90b_VisionDescription =\n \"The Llama 3.2-90B Vision instruction-tuned models are optimized for advanced visual recognition, \\\n complex image reasoning, detailed captioning, and answering intricate questions about images. \\\n These models achieve state-of-the-art results on multiple industry benchmarks for multimodal tasks.\";\n\nconst Llama_3_2_90b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_90b_VisionLiteral,\n description: Llama_3_2_90b_VisionDescription,\n maxInputTokens: 131072,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n price: pricingData[Llama_3_2_90b_VisionLiteral],\n});\n\nconst Llama_3_2_90b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_90b_VisionOptionsType = z.infer<typeof Llama_3_2_90b_VisionOptions>;\n\nclass Llama_3_2_90b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_90b_VisionOptionsType) {\n super(Llama_3_2_90b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_90b_Vision,\n Llama_3_2_90b_VisionLiteral,\n Llama_3_2_90b_VisionOptions,\n Llama_3_2_90b_VisionSchema,\n type Llama_3_2_90b_VisionOptionsType,\n};\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70bLiteral = \"llama3-70b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct\nconst Llama_3_70bDescription =\n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70bLiteral,\n description: Llama_3_70bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n price: pricingData[Llama_3_70bLiteral],\n});\n\nconst Llama_3_70bOptions = BaseChatModelOptions;\ntype Llama_3_70bOptionsType = z.infer<typeof Llama_3_70bOptions>;\n\nclass Llama_3_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_70bOptionsType) {\n super(Llama_3_70bSchema, options);\n }\n}\n\nexport { Llama_3_70b, Llama_3_70bLiteral, Llama_3_70bOptions, Llama_3_70bSchema, type Llama_3_70bOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8bLiteral = \"llama3-8b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct\nconst Llama_3_8bDescription =\n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8bLiteral,\n description: Llama_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n price: pricingData[Llama_3_8bLiteral],\n});\n\nconst Llama_3_8bOptions = BaseChatModelOptions;\ntype Llama_3_8bOptionsType = z.infer<typeof Llama_3_8bOptions>;\n\nclass Llama_3_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_8bOptionsType) {\n super(Llama_3_8bSchema, options);\n }\n}\n\nexport { Llama_3_8b, Llama_3_8bLiteral, Llama_3_8bOptions, Llama_3_8bSchema, type Llama_3_8bOptionsType };\n","import { z } from \"zod\";\n\nimport {\n OpenAIChatModelRoles,\n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport pricingData from \"../pricing.json\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst LlamaGuard_3_8bLiteral = \"llama-guard-3-8b\" as const;\n// https://huggingface.co/meta-llama/Llama-Guard-3-8B\nconst LlamaGuard_3_8bDescription = \"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.\";\n\nconst LlamaGuard_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: LlamaGuard_3_8bLiteral,\n description: LlamaGuard_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n price: pricingData[LlamaGuard_3_8bLiteral],\n});\n\nconst LlamaGuard_3_8bOptions = BaseChatModelOptions;\ntype LlamaGuard_3_8bOptionsType = z.infer<typeof LlamaGuard_3_8bOptions>;\n\nclass LlamaGuard_3_8b extends BaseChatModelGroq {\n constructor(options: LlamaGuard_3_8bOptionsType) {\n super(LlamaGuard_3_8bSchema, options);\n }\n}\n\nexport { LlamaGuard_3_8b, LlamaGuard_3_8bLiteral, LlamaGuard_3_8bOptions, LlamaGuard_3_8bSchema, type LlamaGuard_3_8bOptionsType };\n","import { z } from \"zod\";\n\nimport { ChatModelSchemaType, ChatModelV1, EmbeddingModelSchemaType, EmbeddingModelV1, ProviderError, ProviderV1 } from \"@adaline/provider\";\n\nimport * as Models from \"../models\";\n\nconst ProviderLiteral = \"groq\";\nclass Groq<C extends Models.BaseChatModelOptionsType, E extends Record<string, any> = Record<string, any>> implements ProviderV1<C, E> {\n readonly version = \"v1\" as const;\n readonly name = ProviderLiteral;\n static readonly baseUrl = \"https://api.groq.com/openai/v1\";\n\n private readonly chatModelFactories: Record<\n string,\n {\n model: { new (options: any): ChatModelV1 };\n modelOptions: z.ZodType<any>;\n modelSchema: ChatModelSchemaType;\n }\n > = {\n [Models.Gemma2_9b_ITLiteral]: {\n model: Models.Gemma2_9b_IT,\n modelOptions: Models.Gemma2_9b_ITOptions,\n modelSchema: Models.Gemma2_9b_ITSchema,\n },\n [Models.LlamaGuard_3_8bLiteral]: {\n model: Models.LlamaGuard_3_8b,\n modelOptions: Models.LlamaGuard_3_8bOptions,\n modelSchema: Models.LlamaGuard_3_8bSchema,\n },\n [Models.Llama_3_8bLiteral]: {\n model: Models.Llama_3_8b,\n modelOptions: Models.Llama_3_8bOptions,\n modelSchema: Models.Llama_3_8bSchema,\n },\n [Models.Llama_3_70bLiteral]: {\n model: Models.Llama_3_70b,\n modelOptions: Models.Llama_3_70bOptions,\n modelSchema: Models.Llama_3_70bSchema,\n },\n [Models.Llama_3_1_8bLiteral]: {\n model: Models.Llama_3_1_8b,\n modelOptions: Models.Llama_3_1_8b_Options,\n modelSchema: Models.Llama_3_1_8bSchema,\n },\n [Models.Llama_3_2_11b_VisionLiteral]: {\n model: Models.Llama_3_2_11b_Vision,\n modelOptions: Models.Llama_3_2_11b_VisionOptions,\n modelSchema: Models.Llama_3_2_11b_VisionSchema,\n },\n [Models.Llama_3_2_90b_VisionLiteral]: {\n model: Models.Llama_3_2_90b_Vision,\n modelOptions: Models.Llama_3_2_90b_VisionOptions,\n modelSchema: Models.Llama_3_2_90b_VisionSchema,\n },\n [Models.Llama_3_2_3bLiteral]: {\n model: Models.Llama_3_2_3b,\n modelOptions: Models.Llama_3_2_3b_Options,\n modelSchema: Models.Llama_3_2_3bSchema,\n },\n [Models.Llama_3_2_1bLiteral]: {\n model: Models.Llama_3_2_1b,\n modelOptions: Models.Llama_3_2_1b_Options,\n modelSchema: Models.Llama_3_2_1bSchema,\n },\n };\n\n private readonly embeddingModelFactories: Record<\n string,\n {\n model: { new (options: any): EmbeddingModelV1 };\n modelOptions: z.ZodType<any>;\n modelSchema: EmbeddingModelSchemaType;\n }\n > = {};\n\n chatModelLiterals(): string[] {\n return Object.keys(this.chatModelFactories);\n }\n\n chatModelSchemas(): Record<string, ChatModelSchemaType> {\n return Object.keys(this.chatModelFactories).reduce(\n (acc, key) => {\n acc[key] = this.chatModelFactories[key].modelSchema;\n return acc;\n },\n {} as Record<string, ChatModelSchemaType>\n );\n }\n\n chatModel(options: C): ChatModelV1 {\n const modelName = options.modelName;\n if (!(modelName in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${modelName} not found`,\n cause: new Error(`Groq chat model: ${modelName} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.chatModelFactories[modelName].model;\n const parsedOptions = this.chatModelFactories[modelName].modelOptions.parse(options);\n return new model(parsedOptions);\n }\n\n embeddingModelLiterals(): string[] {\n return Object.keys(this.embeddingModelFactories);\n }\n\n embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType> {\n return Object.keys(this.embeddingModelFactories).reduce(\n (acc, key) => {\n acc[key] = this.embeddingModelFactories[key].modelSchema;\n return acc;\n },\n {} as Record<string, EmbeddingModelSchemaType>\n );\n }\n\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n embeddingModel(options: E): EmbeddingModelV1 {\n throw new ProviderError({\n info: \"Groq does not support embedding models yet\",\n cause: new Error(\"Groq does not support embedding models yet\"),\n });\n }\n}\n\nexport { Groq };\n"]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@adaline/groq",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "1.0.0",
|
|
4
4
|
"license": "MIT",
|
|
5
5
|
"sideEffects": false,
|
|
6
6
|
"private": false,
|
|
@@ -46,9 +46,9 @@
|
|
|
46
46
|
],
|
|
47
47
|
"dependencies": {
|
|
48
48
|
"zod": "^3.23.8",
|
|
49
|
-
"@adaline/provider": "0.
|
|
50
|
-
"@adaline/types": "0.
|
|
51
|
-
"@adaline/openai": "0.
|
|
49
|
+
"@adaline/provider": "1.0.0",
|
|
50
|
+
"@adaline/types": "1.0.0",
|
|
51
|
+
"@adaline/openai": "1.0.0"
|
|
52
52
|
},
|
|
53
53
|
"devDependencies": {
|
|
54
54
|
"@adaline/tsconfig": "0.11.0",
|