@adaline/groq 0.2.0 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +3 -1
- package/dist/index.d.ts +3 -1
- package/dist/index.js +21 -19
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +8 -6
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.d.mts
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
|
-
import { ProviderV1, ChatModelV1, ChatModelSchemaType, EmbeddingModelV1, EmbeddingModelSchemaType } from '@adaline/provider';
|
|
1
|
+
import { ProviderV1, ChatModelV1, ChatModelSchemaType, EmbeddingModelV1, EmbeddingModelSchemaType, ParamsType } from '@adaline/provider';
|
|
2
2
|
import { z } from 'zod';
|
|
3
3
|
import { BaseChatModel } from '@adaline/openai';
|
|
4
|
+
import { MessageType } from '@adaline/types';
|
|
4
5
|
|
|
5
6
|
declare class Groq<O extends Record<string, any> = Record<string, any>> implements ProviderV1<O> {
|
|
6
7
|
readonly version: "v1";
|
|
@@ -31,6 +32,7 @@ declare class BaseChatModelGroq extends BaseChatModel {
|
|
|
31
32
|
modelSchema: ChatModelSchemaType;
|
|
32
33
|
private readonly groqApiKey;
|
|
33
34
|
constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType);
|
|
35
|
+
transformMessages(messages: MessageType[]): ParamsType;
|
|
34
36
|
}
|
|
35
37
|
|
|
36
38
|
declare const Mixtral_8x7bLiteral: "mixtral-8x7b-32768";
|
package/dist/index.d.ts
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
|
-
import { ProviderV1, ChatModelV1, ChatModelSchemaType, EmbeddingModelV1, EmbeddingModelSchemaType } from '@adaline/provider';
|
|
1
|
+
import { ProviderV1, ChatModelV1, ChatModelSchemaType, EmbeddingModelV1, EmbeddingModelSchemaType, ParamsType } from '@adaline/provider';
|
|
2
2
|
import { z } from 'zod';
|
|
3
3
|
import { BaseChatModel } from '@adaline/openai';
|
|
4
|
+
import { MessageType } from '@adaline/types';
|
|
4
5
|
|
|
5
6
|
declare class Groq<O extends Record<string, any> = Record<string, any>> implements ProviderV1<O> {
|
|
6
7
|
readonly version: "v1";
|
|
@@ -31,6 +32,7 @@ declare class BaseChatModelGroq extends BaseChatModel {
|
|
|
31
32
|
modelSchema: ChatModelSchemaType;
|
|
32
33
|
private readonly groqApiKey;
|
|
33
34
|
constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType);
|
|
35
|
+
transformMessages(messages: MessageType[]): ParamsType;
|
|
34
36
|
}
|
|
35
37
|
|
|
36
38
|
declare const Mixtral_8x7bLiteral: "mixtral-8x7b-32768";
|
package/dist/index.js
CHANGED
|
@@ -4,28 +4,30 @@ var provider = require('@adaline/provider');
|
|
|
4
4
|
var zod = require('zod');
|
|
5
5
|
var openai = require('@adaline/openai');
|
|
6
6
|
|
|
7
|
-
var Oe=Object.defineProperty;var ae=Object.getOwnPropertySymbols;var Te=Object.prototype.hasOwnProperty,Le=Object.prototype.propertyIsEnumerable;var te=(a,e,s)=>e in a?Oe(a,e,{enumerable:!0,configurable:!0,writable:!0,value:s}):a[e]=s,g=(a,e)=>{for(var s in e||(e={}))Te.call(e,s)&&te(a,s,e[s]);if(ae)for(var s of ae(e))Le.call(e,s)&&te(a,s,e[s]);return a};var i=zod.z.object({apiKey:zod.z.string().min(1)}),t=class extends openai.BaseChatModel{constructor(s,d){let m=i.parse(d),l=p.baseUrl;super(s,{apiKey:m.apiKey,baseUrl:l,completeChatUrl:`${l}/chat/completions`,streamChatUrl:`${l}/chat/completions`});this.version="v1";this.modelSchema=s,this.groqApiKey=m.apiKey;}};var x=provider.RangeConfigItem({param:"temperature",title:provider.CHAT_CONFIG.TEMPERATURE.title,description:provider.CHAT_CONFIG.TEMPERATURE.description,min:0,max:2,step:.01,default:1}),S=a=>provider.RangeConfigItem({param:"max_tokens",title:provider.CHAT_CONFIG.MAX_TOKENS.title,description:provider.CHAT_CONFIG.MAX_TOKENS.description,min:0,max:a,step:1,default:0}),I=provider.MultiStringConfigItem({param:"stop",title:provider.CHAT_CONFIG.STOP(4).title,description:provider.CHAT_CONFIG.STOP(4).description,max:4}),A=provider.RangeConfigItem({param:"top_p",title:provider.CHAT_CONFIG.TOP_P.title,description:provider.CHAT_CONFIG.TOP_P.description,min:0,max:1,step:.01,default:1}),E=provider.RangeConfigItem({param:"frequency_penalty",title:provider.CHAT_CONFIG.FREQUENCY_PENALTY.title,description:provider.CHAT_CONFIG.FREQUENCY_PENALTY.description,min:-2,max:2,step:.01,default:0}),q=provider.RangeConfigItem({param:"presence_penalty",title:provider.CHAT_CONFIG.PRESENCE_PENALTY.title,description:provider.CHAT_CONFIG.PRESENCE_PENALTY.description,min:-2,max:2,step:.01,default:0}),G=provider.RangeConfigItem({param:"seed",title:provider.CHAT_CONFIG.SEED.title,description:provider.CHAT_CONFIG.SEED.description,min:0,max:1e6,step:1,default:0}),R=provider.SelectStringConfigItem({param:"response_format",title:provider.CHAT_CONFIG.RESPONSE_FORMAT.title,description:provider.CHAT_CONFIG.RESPONSE_FORMAT.description,default:"text",choices:["text","json_object"]}),B=provider.SelectStringConfigItem({param:"tool_choice",title:"Tool choice",description:"Controls which (if any) tool is called by the model. 'none' means the model will not call a function. 'auto' means the model can pick between generating a message or calling a tool.",default:"auto",choices:["auto","required","none"]});var ne=a=>zod.z.object({temperature:x.schema,maxTokens:S(a).schema,stop:I.schema,topP:A.schema,frequencyPenalty:E.schema,presencePenalty:q.schema,seed:G.schema.transform(e=>e===0?void 0:e),responseFormat:R.schema,toolChoice:B.schema}),le=a=>({temperature:x.def,maxTokens:S(a).def,stop:I.def,topP:A.def,frequencyPenalty:E.def,presencePenalty:q.def,seed:G.def,responseFormat:R.def,toolChoice:B.def});var o={base:a=>({def:le(a),schema:ne(a)})};var k="mixtral-8x7b-32768",qe="The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts.",v=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:k,description:qe,maxInputTokens:32768,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),me=i,c=class extends t{constructor(e){super(v,e);}};var z="llama-guard-3-8b",ze="Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.",U=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:z,description:ze,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),re=i,_=class extends t{constructor(e){super(U,e);}};var P="llama3-8b-8192",De="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",F=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:P,description:De,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),de=i,h=class extends t{constructor(e){super(F,e);}};var w="llama3-70b-8192",Qe="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",V=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:w,description:Qe,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),pe=i,f=class extends t{constructor(e){super(V,e);}};var D="llama3-groq-70b-8192-tool-use-preview",eo="This is the 70B parameter version of the Llama 3 Groq Tool Use model, specifically designed for advanced tool use and function calling tasks.",$=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:D,description:eo,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),ce=i,b=class extends t{constructor(e){super($,e);}};var N="llama3-groq-8b-8192-tool-use-preview",no="This is the 8B parameter version of the Llama 3 Groq Tool Use model, specifically designed for advanced tool use and function calling tasks.",j=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:N,description:no,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),_e=i,u=class extends t{constructor(e){super(j,e);}};var K="llama-3.2-11b-vision-preview",_o="The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image. The models outperform many of the available open source and closed multimodal models on common industry benchmarks.",Y=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelModalitiesEnum).parse({name:K,description:_o,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),he=i,M=class extends t{constructor(e){super(Y,e);}};var Q="llama-3.2-3b-preview",Oo="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",X=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:Q,description:Oo,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),fe=i,O=class extends t{constructor(e){super(X,e);}};var Z="llama-3.2-1b-preview",xo="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",H=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:Z,description:xo,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),be=i,T=class extends t{constructor(e){super(H,e);}};var J="llama-3.1-70b-versatile",Go="The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",W=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:J,description:Go,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),ue=i,L=class extends t{constructor(e){super(W,e);}};var ee="llama-3.1-8b-instant",Uo="The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",oe=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:ee,description:Uo,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),Me=i,y=class extends t{constructor(e){super(oe,e);}};var Po="groq",p=class{constructor(){this.version="v1";this.name=Po;this.chatModelFactories={[z]:{model:_,modelOptions:re,modelSchema:U},[P]:{model:h,modelOptions:de,modelSchema:F},[w]:{model:f,modelOptions:pe,modelSchema:V},[ee]:{model:y,modelOptions:Me,modelSchema:oe},[N]:{model:u,modelOptions:_e,modelSchema:j},[J]:{model:L,modelOptions:ue,modelSchema:W},[D]:{model:b,modelOptions:ce,modelSchema:$},[K]:{model:M,modelOptions:he,modelSchema:Y},[Q]:{model:O,modelOptions:fe,modelSchema:X},[Z]:{model:T,modelOptions:be,modelSchema:H},[k]:{model:c,modelOptions:me,modelSchema:v}};this.embeddingModelFactories={};}chatModelLiterals(){return Object.keys(this.chatModelFactories)}chatModel(e,s){if(!(e in this.chatModelFactories))throw new provider.ProviderError({info:`Groq chat model: ${e} not found`,cause:new Error(`Groq chat model: ${e} not found, available chat models:
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
${this.
|
|
11
|
-
${this.
|
|
7
|
+
var Oe=Object.defineProperty;var te=Object.getOwnPropertySymbols;var Te=Object.prototype.hasOwnProperty,Le=Object.prototype.propertyIsEnumerable;var ae=(t,e,a)=>e in t?Oe(t,e,{enumerable:!0,configurable:!0,writable:!0,value:a}):t[e]=a,g=(t,e)=>{for(var a in e||(e={}))Te.call(e,a)&&ae(t,a,e[a]);if(te)for(var a of te(e))Le.call(e,a)&&ae(t,a,e[a]);return t};var n=zod.z.object({apiKey:zod.z.string().min(1)}),s=class extends openai.BaseChatModel{constructor(a,r){let i=n.parse(r),m=d.baseUrl;super(a,{apiKey:i.apiKey,baseUrl:m,completeChatUrl:`${m}/chat/completions`,streamChatUrl:`${m}/chat/completions`});this.version="v1";this.modelSchema=a,this.groqApiKey=i.apiKey;}transformMessages(a){let r=super.transformMessages(a);return r.messages.forEach(i=>{i.role==="system"?typeof i.content!="string"&&(i.content=i.content.map(m=>m.text).join(`
|
|
8
|
+
`)):i.role==="assistant"&&i.content&&typeof i.content!="string"&&(i.content=i.content.map(m=>m.text).join(`
|
|
9
|
+
`));}),r}};var x=provider.RangeConfigItem({param:"temperature",title:provider.CHAT_CONFIG.TEMPERATURE.title,description:provider.CHAT_CONFIG.TEMPERATURE.description,min:0,max:2,step:.01,default:1}),S=t=>provider.RangeConfigItem({param:"max_tokens",title:provider.CHAT_CONFIG.MAX_TOKENS.title,description:provider.CHAT_CONFIG.MAX_TOKENS.description,min:0,max:t,step:1,default:0}),I=provider.MultiStringConfigItem({param:"stop",title:provider.CHAT_CONFIG.STOP(4).title,description:provider.CHAT_CONFIG.STOP(4).description,max:4}),A=provider.RangeConfigItem({param:"top_p",title:provider.CHAT_CONFIG.TOP_P.title,description:provider.CHAT_CONFIG.TOP_P.description,min:0,max:1,step:.01,default:1}),E=provider.RangeConfigItem({param:"frequency_penalty",title:provider.CHAT_CONFIG.FREQUENCY_PENALTY.title,description:provider.CHAT_CONFIG.FREQUENCY_PENALTY.description,min:-2,max:2,step:.01,default:0}),q=provider.RangeConfigItem({param:"presence_penalty",title:provider.CHAT_CONFIG.PRESENCE_PENALTY.title,description:provider.CHAT_CONFIG.PRESENCE_PENALTY.description,min:-2,max:2,step:.01,default:0}),R=provider.RangeConfigItem({param:"seed",title:provider.CHAT_CONFIG.SEED.title,description:provider.CHAT_CONFIG.SEED.description,min:0,max:1e6,step:1,default:0}),G=provider.SelectStringConfigItem({param:"response_format",title:provider.CHAT_CONFIG.RESPONSE_FORMAT.title,description:provider.CHAT_CONFIG.RESPONSE_FORMAT.description,default:"text",choices:["text","json_object"]}),B=provider.SelectStringConfigItem({param:"tool_choice",title:"Tool choice",description:"Controls which (if any) tool is called by the model. 'none' means the model will not call a function. 'auto' means the model can pick between generating a message or calling a tool.",default:"auto",choices:["auto","required","none"]});var ie=t=>zod.z.object({temperature:x.schema,maxTokens:S(t).schema,stop:I.schema,topP:A.schema,frequencyPenalty:E.schema,presencePenalty:q.schema,seed:R.schema.transform(e=>e===0?void 0:e),responseFormat:G.schema,toolChoice:B.schema}),le=t=>({temperature:x.def,maxTokens:S(t).def,stop:I.def,topP:A.def,frequencyPenalty:E.def,presencePenalty:q.def,seed:R.def,responseFormat:G.def,toolChoice:B.def});var o={base:t=>({def:le(t),schema:ie(t)})};var k="mixtral-8x7b-32768",qe="The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts.",v=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:k,description:qe,maxInputTokens:32768,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),me=n,c=class extends s{constructor(e){super(v,e);}};var z="llama-guard-3-8b",ze="Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.",P=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:z,description:ze,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),re=n,_=class extends s{constructor(e){super(P,e);}};var U="llama3-8b-8192",De="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",F=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:U,description:De,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),pe=n,h=class extends s{constructor(e){super(F,e);}};var w="llama3-70b-8192",Qe="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",V=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:w,description:Qe,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),de=n,f=class extends s{constructor(e){super(V,e);}};var D="llama3-groq-70b-8192-tool-use-preview",eo="This is the 70B parameter version of the Llama 3 Groq Tool Use model, specifically designed for advanced tool use and function calling tasks.",j=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:D,description:eo,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),ce=n,b=class extends s{constructor(e){super(j,e);}};var $="llama3-groq-8b-8192-tool-use-preview",io="This is the 8B parameter version of the Llama 3 Groq Tool Use model, specifically designed for advanced tool use and function calling tasks.",N=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:$,description:io,maxInputTokens:8192,maxOutputTokens:4096,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),_e=n,M=class extends s{constructor(e){super(N,e);}};var K="llama-3.2-11b-vision-preview",_o="The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image. The models outperform many of the available open source and closed multimodal models on common industry benchmarks.",Y=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelModalitiesEnum).parse({name:K,description:_o,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),he=n,u=class extends s{constructor(e){super(Y,e);}};var Q="llama-3.2-3b-preview",Oo="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",X=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:Q,description:Oo,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),fe=n,O=class extends s{constructor(e){super(X,e);}};var Z="llama-3.2-1b-preview",xo="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",H=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:Z,description:xo,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),be=n,T=class extends s{constructor(e){super(H,e);}};var J="llama-3.1-70b-versatile",Ro="The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",W=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:J,description:Ro,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),Me=n,L=class extends s{constructor(e){super(W,e);}};var ee="llama-3.1-8b-instant",Po="The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",oe=provider.ChatModelSchema(openai.OpenAIChatModelRoles,openai.OpenAIChatModelTextToolModalitiesEnum).parse({name:ee,description:Po,maxInputTokens:128e3,maxOutputTokens:8192,roles:openai.OpenAIChatModelRolesMap,modalities:openai.OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),ue=n,y=class extends s{constructor(e){super(oe,e);}};var Uo="groq",d=class{constructor(){this.version="v1";this.name=Uo;this.chatModelFactories={[z]:{model:_,modelOptions:re,modelSchema:P},[U]:{model:h,modelOptions:pe,modelSchema:F},[w]:{model:f,modelOptions:de,modelSchema:V},[ee]:{model:y,modelOptions:ue,modelSchema:oe},[$]:{model:M,modelOptions:_e,modelSchema:N},[J]:{model:L,modelOptions:Me,modelSchema:W},[D]:{model:b,modelOptions:ce,modelSchema:j},[K]:{model:u,modelOptions:he,modelSchema:Y},[Q]:{model:O,modelOptions:fe,modelSchema:X},[Z]:{model:T,modelOptions:be,modelSchema:H},[k]:{model:c,modelOptions:me,modelSchema:v}};this.embeddingModelFactories={};}chatModelLiterals(){return Object.keys(this.chatModelFactories)}chatModel(e,a){if(!(e in this.chatModelFactories))throw new provider.ProviderError({info:`Groq chat model: ${e} not found`,cause:new Error(`Groq chat model: ${e} not found, available chat models:
|
|
10
|
+
${this.chatModelLiterals().join(", ")}`)});let r=this.chatModelFactories[e].model,i=g({},a),m=this.chatModelFactories[e].modelOptions.parse(i);return new r(m)}chatModelSchema(e){if(!(e in this.chatModelFactories))throw new provider.ProviderError({info:`Groq chat model: ${e} not found`,cause:new Error(`Groq chat model: ${e} not found, available chat models:
|
|
11
|
+
${this.chatModelLiterals().join(", ")}`)});return this.chatModelFactories[e].modelSchema}chatModelSchemas(){return Object.keys(this.chatModelFactories).reduce((e,a)=>(e[a]=this.chatModelFactories[a].modelSchema,e),{})}embeddingModelLiterals(){return Object.keys(this.embeddingModelFactories)}embeddingModel(e,a){if(!(e in this.embeddingModelFactories))throw new provider.ProviderError({info:`Groq embedding model: ${e} not found`,cause:new Error(`Groq embedding model: ${e} not found, available embedding models:
|
|
12
|
+
${this.embeddingModelLiterals().join(", ")}`)});let r=this.embeddingModelFactories[e].model,i=g({},a),m=this.embeddingModelFactories[e].modelOptions.parse(i);return new r(m)}embeddingModelSchema(e){if(!(e in this.embeddingModelFactories))throw new provider.ProviderError({info:`Groq embedding model: ${e} not found`,cause:new Error(`Groq embedding model: ${e} not found, available embedding models:
|
|
13
|
+
${this.embeddingModelLiterals().join(", ")}`)});return this.embeddingModelFactories[e].modelSchema}embeddingModelSchemas(){return Object.keys(this.embeddingModelFactories).reduce((e,a)=>(e[a]=this.embeddingModelFactories[a].modelSchema,e),{})}};d.baseUrl="https://api.groq.com/openai/v1";
|
|
12
14
|
|
|
13
|
-
exports.BaseChatModelGroq =
|
|
14
|
-
exports.BaseChatModelOptions =
|
|
15
|
-
exports.Groq =
|
|
15
|
+
exports.BaseChatModelGroq = s;
|
|
16
|
+
exports.BaseChatModelOptions = n;
|
|
17
|
+
exports.Groq = d;
|
|
16
18
|
exports.LlamaGuard_3_8b = _;
|
|
17
19
|
exports.LlamaGuard_3_8bLiteral = z;
|
|
18
20
|
exports.LlamaGuard_3_8bOptions = re;
|
|
19
|
-
exports.LlamaGuard_3_8bSchema =
|
|
21
|
+
exports.LlamaGuard_3_8bSchema = P;
|
|
20
22
|
exports.Llama_3_1_70b = L;
|
|
21
23
|
exports.Llama_3_1_70bLiteral = J;
|
|
22
24
|
exports.Llama_3_1_70bSchema = W;
|
|
23
|
-
exports.Llama_3_1_70b_Options =
|
|
25
|
+
exports.Llama_3_1_70b_Options = Me;
|
|
24
26
|
exports.Llama_3_1_8b = y;
|
|
25
27
|
exports.Llama_3_1_8bLiteral = ee;
|
|
26
28
|
exports.Llama_3_1_8bSchema = oe;
|
|
27
|
-
exports.Llama_3_1_8b_Options =
|
|
28
|
-
exports.Llama_3_2_11b_Vision =
|
|
29
|
+
exports.Llama_3_1_8b_Options = ue;
|
|
30
|
+
exports.Llama_3_2_11b_Vision = u;
|
|
29
31
|
exports.Llama_3_2_11b_VisionLiteral = K;
|
|
30
32
|
exports.Llama_3_2_11b_VisionOptions = he;
|
|
31
33
|
exports.Llama_3_2_11b_VisionSchema = Y;
|
|
@@ -39,19 +41,19 @@ exports.Llama_3_2_3bSchema = X;
|
|
|
39
41
|
exports.Llama_3_2_3b_Options = fe;
|
|
40
42
|
exports.Llama_3_70b = f;
|
|
41
43
|
exports.Llama_3_70bLiteral = w;
|
|
42
|
-
exports.Llama_3_70bOptions =
|
|
44
|
+
exports.Llama_3_70bOptions = de;
|
|
43
45
|
exports.Llama_3_70bSchema = V;
|
|
44
46
|
exports.Llama_3_70b_Tool_Use = b;
|
|
45
47
|
exports.Llama_3_70b_Tool_UseLiteral = D;
|
|
46
|
-
exports.Llama_3_70b_Tool_UseSchema =
|
|
48
|
+
exports.Llama_3_70b_Tool_UseSchema = j;
|
|
47
49
|
exports.Llama_3_70b_Tool_Use_Options = ce;
|
|
48
50
|
exports.Llama_3_8b = h;
|
|
49
|
-
exports.Llama_3_8bLiteral =
|
|
50
|
-
exports.Llama_3_8bOptions =
|
|
51
|
+
exports.Llama_3_8bLiteral = U;
|
|
52
|
+
exports.Llama_3_8bOptions = pe;
|
|
51
53
|
exports.Llama_3_8bSchema = F;
|
|
52
|
-
exports.Llama_3_8b_Tool_Use =
|
|
53
|
-
exports.Llama_3_8b_Tool_UseLiteral =
|
|
54
|
-
exports.Llama_3_8b_Tool_UseSchema =
|
|
54
|
+
exports.Llama_3_8b_Tool_Use = M;
|
|
55
|
+
exports.Llama_3_8b_Tool_UseLiteral = $;
|
|
56
|
+
exports.Llama_3_8b_Tool_UseSchema = N;
|
|
55
57
|
exports.Llama_3_8b_Tool_Use_Options = _e;
|
|
56
58
|
exports.Mixtral_8x7b = c;
|
|
57
59
|
exports.Mixtral_8x7bLiteral = k;
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/provider/provider.groq.ts","../src/models/chat-models/base-chat-model.groq.ts","../src/configs/chat-model/common.config.chat-model.groq.ts","../src/configs/chat-model/base.config.chat-model.groq.ts","../src/configs/configs.groq.ts","../src/models/chat-models/mixtral-8x7b.groq.ts","../src/models/chat-models/llama-guard-3-8b.groq.ts","../src/models/chat-models/llama-3-8b.groq.ts","../src/models/chat-models/llama-3-70b.groq.ts","../src/models/chat-models/llama-3-70b-tool-use.groq.ts","../src/models/chat-models/llama-3-8b-tool-use.groq.ts","../src/models/chat-models/llama-3-2-11b-vision.groq.ts","../src/models/chat-models/llama-3-2-3b.groq.ts","../src/models/chat-models/llama-3-2-1b.groq.ts","../src/models/chat-models/llama-3-1-70b.groq.ts","../src/models/chat-models/llama-3-1-8b.groq.ts"],"names":["BaseChatModelOptions","z","BaseChatModelGroq","BaseChatModel","modelSchema","options","parsedOptions","baseUrl","Groq","temperature","RangeConfigItem","CHAT_CONFIG","maxTokens","maxOutputTokens","stop","MultiStringConfigItem","topP","frequencyPenalty","presencePenalty","seed","responseFormat","SelectStringConfigItem","toolChoice","ChatModelBaseConfigSchema","value","ChatModelBaseConfigDef","GroqChatModelConfigs","Mixtral_8x7bLiteral","Mixtral_8x7bDescription","Mixtral_8x7bSchema","ChatModelSchema","OpenAIChatModelRoles","OpenAIChatModelTextToolModalitiesEnum","OpenAIChatModelRolesMap","OpenAIChatModelTextToolModalities","Mixtral_8x7bOptions","Mixtral_8x7b","LlamaGuard_3_8bLiteral","LlamaGuard_3_8bDescription","LlamaGuard_3_8bSchema","LlamaGuard_3_8bOptions","LlamaGuard_3_8b","Llama_3_8bLiteral","Llama_3_8bDescription","Llama_3_8bSchema","Llama_3_8bOptions","Llama_3_8b","Llama_3_70bLiteral","Llama_3_70bDescription","Llama_3_70bSchema","Llama_3_70bOptions","Llama_3_70b","Llama_3_70b_Tool_UseLiteral","Llama_3_70b_Tool_UseDescription","Llama_3_70b_Tool_UseSchema","Llama_3_70b_Tool_Use_Options","Llama_3_70b_Tool_Use","Llama_3_8b_Tool_UseLiteral","Llama_3_8b_Tool_UseDescription","Llama_3_8b_Tool_UseSchema","Llama_3_8b_Tool_Use_Options","Llama_3_8b_Tool_Use","Llama_3_2_11b_VisionLiteral","Llama_3_2_11b_VisionDescription","Llama_3_2_11b_VisionSchema","OpenAIChatModelModalitiesEnum","OpenAIChatModelModalities","Llama_3_2_11b_VisionOptions","Llama_3_2_11b_Vision","Llama_3_2_3bLiteral","Llama_3_2_3bDescription","Llama_3_2_3bSchema","Llama_3_2_3b_Options","Llama_3_2_3b","Llama_3_2_1bLiteral","Llama_3_2_1bDescription","Llama_3_2_1bSchema","Llama_3_2_1b_Options","Llama_3_2_1b","Llama_3_1_70bLiteral","Llama_3_1_70bDescription","Llama_3_1_70bSchema","Llama_3_1_70b_Options","Llama_3_1_70b","Llama_3_1_8bLiteral","Llama_3_1_8bDescription","Llama_3_1_8bSchema","Llama_3_1_8b_Options","Llama_3_1_8b","ProviderLiteral","name","ProviderError","model","modelOptions","__spreadValues","acc","key"],"mappings":";;;;;;AAEA,IAAA,EAAA,CAAA,MAAA,CAAA,cAAA,CAAA,IAAA,EAAA,CAAA,MAAA,CAAA,qBAAA,CAAA,IAAA,EAAA,CAAA,MAAA,CAAA,SAAA,CAAA,cAAA,CAAA,EAAA,CAAA,MAAA,CAAA,SAAA,CAAA,oBAAA,CAAA,IAAA,EAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,GAAA,CAAA,IAAA,CAAA,CAAA,EAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,UAAA,CAAA,CAAA,CAAA,CAAA,YAAA,CAAA,CAAA,CAAA,CAAA,QAAA,CAAA,CAAA,CAAA,CAAA,KAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,GAAA,CAAA,IAAA,IAAA,CAAA,IAAA,CAAA,GAAA,CAAA,CAAA,EAAA,CAAA,CAAA,EAAA,CAAA,IAAA,CAAA,CAAA,CAAA,CAAA,CAAA,EAAA,EAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,GAAA,EAAA,CAAA,IAAA,IAAA,CAAA,IAAA,EAAA,CAAA,CAAA,CAAA,CAAA,EAAA,CAAA,IAAA,CAAA,CAAA,CAAA,CAAA,CAAA,EAAA,EAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,OAAA,CAAA,CAAA,CCKMA,IAAAA,CAAAA,CAAuBC,KAAE,CAAA,MAAA,CAAO,CACpC,MAAQA,CAAAA,KAAAA,CAAE,MAAO,EAAA,CAAE,IAAI,CAAC,CAC1B,CAAC,CAAA,CAGKC,EAAN,cAAgCC,oBAAc,CAM5C,WAAA,CAAYC,EAAkCC,CAAmC,CAAA,CAC/E,IAAMC,CAAgBN,CAAAA,CAAAA,CAAqB,MAAMK,CAAO,CAAA,CAClDE,CAAUC,CAAAA,CAAAA,CAAK,QACrB,KAAMJ,CAAAA,CAAAA,CAAa,CACjB,MAAA,CAAQE,EAAc,MACtB,CAAA,OAAA,CAASC,CACT,CAAA,eAAA,CAAiB,GAAGA,CAAO,CAAA,iBAAA,CAAA,CAC3B,aAAe,CAAA,CAAA,EAAGA,CAAO,CAC3B,iBAAA,CAAA,CAAC,CAbH,CAAA,IAAA,CAAS,QAAU,IAcjB,CAAA,IAAA,CAAK,WAAcH,CAAAA,CAAAA,CACnB,KAAK,UAAaE,CAAAA,CAAAA,CAAc,OAClC,CAGF,EC9BA,IAAMG,CAAAA,CAAcC,yBAAgB,CAClC,KAAA,CAAO,aACP,CAAA,KAAA,CAAOC,qBAAY,WAAY,CAAA,KAAA,CAC/B,WAAaA,CAAAA,oBAAAA,CAAY,YAAY,WACrC,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,CAAA,CACL,KAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,EAEKC,CAAaC,CAAAA,CAAAA,EACjBH,wBAAgB,CAAA,CACd,MAAO,YACP,CAAA,KAAA,CAAOC,oBAAY,CAAA,UAAA,CAAW,MAC9B,WAAaA,CAAAA,oBAAAA,CAAY,WAAW,WACpC,CAAA,GAAA,CAAK,EACL,GAAKE,CAAAA,CAAAA,CACL,IAAM,CAAA,CAAA,CACN,QAAS,CACX,CAAC,CAEGC,CAAAA,CAAAA,CAAOC,+BAAsB,CAC/B,KAAA,CAAO,MACP,CAAA,KAAA,CAAOJ,qBAAY,IAAK,CAAA,CAAC,EAAE,KAC3B,CAAA,WAAA,CAAaA,qBAAY,IAAK,CAAA,CAAC,CAAE,CAAA,WAAA,CACjC,IAAK,CACP,CAAC,CAEGK,CAAAA,CAAAA,CAAON,yBAAgB,CAC3B,KAAA,CAAO,OACP,CAAA,KAAA,CAAOC,qBAAY,KAAM,CAAA,KAAA,CACzB,WAAaA,CAAAA,oBAAAA,CAAY,MAAM,WAC/B,CAAA,GAAA,CAAK,CACL,CAAA,GAAA,CAAK,EACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAEKM,CAAAA,CAAAA,CAAmBP,wBAAgB,CAAA,CACvC,MAAO,mBACP,CAAA,KAAA,CAAOC,qBAAY,iBAAkB,CAAA,KAAA,CACrC,YAAaA,oBAAY,CAAA,iBAAA,CAAkB,WAC3C,CAAA,GAAA,CAAK,GACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,QAAS,CACX,CAAC,CAEKO,CAAAA,CAAAA,CAAkBR,yBAAgB,CACtC,KAAA,CAAO,mBACP,KAAOC,CAAAA,oBAAAA,CAAY,iBAAiB,KACpC,CAAA,WAAA,CAAaA,oBAAY,CAAA,gBAAA,CAAiB,YAC1C,GAAK,CAAA,CAAA,CAAA,CACL,GAAK,CAAA,CAAA,CACL,KAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,EAEKQ,CAAOT,CAAAA,wBAAAA,CAAgB,CAC3B,KAAO,CAAA,MAAA,CACP,MAAOC,oBAAY,CAAA,IAAA,CAAK,KACxB,CAAA,WAAA,CAAaA,qBAAY,IAAK,CAAA,WAAA,CAC9B,GAAK,CAAA,CAAA,CACL,IAAK,GACL,CAAA,IAAA,CAAM,CACN,CAAA,OAAA,CAAS,CACX,CAAC,CAAA,CAEKS,CAAiBC,CAAAA,+BAAAA,CAAuB,CAC5C,KAAO,CAAA,iBAAA,CACP,KAAOV,CAAAA,oBAAAA,CAAY,gBAAgB,KACnC,CAAA,WAAA,CAAaA,oBAAY,CAAA,eAAA,CAAgB,YACzC,OAAS,CAAA,MAAA,CACT,OAAS,CAAA,CAAC,OAAQ,aAAa,CACjC,CAAC,CAEKW,CAAAA,CAAAA,CAAaD,gCAAuB,CACxC,KAAA,CAAO,aACP,CAAA,KAAA,CAAO,cACP,WACE,CAAA,+LAAA,CAGF,OAAS,CAAA,MAAA,CACT,QAAS,CAAC,MAAA,CAAQ,UAAY,CAAA,MAAM,CACtC,CAAC,CAAA,CCzED,IAAME,EAA6BV,CAAAA,CAAAA,EACjCZ,MAAE,MAAO,CAAA,CACP,WAAaQ,CAAAA,CAAAA,CAAY,OACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,OACtC,IAAMC,CAAAA,CAAAA,CAAK,MACX,CAAA,IAAA,CAAME,EAAK,MACX,CAAA,gBAAA,CAAkBC,EAAiB,MACnC,CAAA,eAAA,CAAiBC,EAAgB,MACjC,CAAA,IAAA,CAAMC,CAAK,CAAA,MAAA,CAAO,UAAWK,CAAWA,EAAAA,CAAAA,GAAU,CAAI,CAAA,KAAA,CAAA,CAAYA,CAAM,CACxE,CAAA,cAAA,CAAgBJ,CAAe,CAAA,MAAA,CAC/B,WAAYE,CAAW,CAAA,MACzB,CAAC,CAAA,CAEGG,GAA0BZ,CAC7B,GAAA,CACC,WAAaJ,CAAAA,CAAAA,CAAY,IACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,IACtC,IAAMC,CAAAA,CAAAA,CAAK,GACX,CAAA,IAAA,CAAME,EAAK,GACX,CAAA,gBAAA,CAAkBC,EAAiB,GACnC,CAAA,eAAA,CAAiBC,EAAgB,GACjC,CAAA,IAAA,CAAMC,CAAK,CAAA,GAAA,CACX,eAAgBC,CAAe,CAAA,GAAA,CAC/B,UAAYE,CAAAA,CAAAA,CAAW,GACzB,CCjCF,CAAA,CAAA,IAAMI,CAAuB,CAAA,CAC3B,KAAOb,CAA6B,GAAA,CAClC,IAAKY,EAAuBZ,CAAAA,CAAe,EAC3C,MAAQU,CAAAA,EAAAA,CAA0BV,CAAe,CACnD,EACF,CCIA,CAAA,IAAMc,CAAsB,CAAA,oBAAA,CAEtBC,GACJ,mGAEIC,CAAAA,CAAAA,CAAqBC,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAML,CAAAA,CAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOK,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKS,CAAAA,EAAAA,CAAsBnC,CAGtBoC,CAAAA,CAAAA,CAAN,cAA2BlC,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAkC,CAC5C,KAAMwB,CAAAA,CAAAA,CAAoBxB,CAAO,EACnC,CACF,MCzBMgC,CAAyB,CAAA,kBAAA,CAEzBC,GACJ,iGAEIC,CAAAA,CAAAA,CAAwBT,yBAAgBC,2BAAsBC,CAAAA,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC/G,IAAMK,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOL,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKc,CAAAA,EAAAA,CAAyBxC,EAGzByC,CAAN,CAAA,cAA8BvC,CAAkB,CAC9C,YAAYG,CAAqC,CAAA,CAC/C,KAAMkC,CAAAA,CAAAA,CAAuBlC,CAAO,EACtC,CACF,ECzBA,IAAMqC,CAAoB,CAAA,gBAAA,CAEpBC,GACJ,yKAGIC,CAAAA,CAAAA,CAAmBd,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC1G,IAAA,CAAMU,EACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,KACjB,KAAOV,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,EAEKmB,EAAoB7C,CAAAA,CAAAA,CAGpB8C,CAAN,CAAA,cAAyB5C,CAAkB,CACzC,WAAA,CAAYG,CAAgC,CAAA,CAC1C,MAAMuC,CAAkBvC,CAAAA,CAAO,EACjC,CACF,MC1BM0C,CAAqB,CAAA,iBAAA,CAErBC,EACJ,CAAA,yKAAA,CAGIC,EAAoBnB,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC3G,IAAMe,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOf,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,IAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKwB,CAAAA,EAAAA,CAAqBlD,EAGrBmD,CAAN,CAAA,cAA0BjD,CAAkB,CAC1C,YAAYG,CAAiC,CAAA,CAC3C,MAAM4C,CAAmB5C,CAAAA,CAAO,EAClC,CACF,EC1BA,IAAM+C,CAA8B,CAAA,uCAAA,CAE9BC,GACJ,iJAGIC,CAAAA,CAAAA,CAA6BxB,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAE,CAAA,KAAA,CAAM,CACpH,IAAA,CAAMoB,EACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,IAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOpB,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEK6B,CAAAA,EAAAA,CAA+BvD,EAG/BwD,CAAN,CAAA,cAAmCtD,CAAkB,CACnD,WAAA,CAAYG,CAA2C,CAAA,CACrD,MAAMiD,CAA4BjD,CAAAA,CAAO,EAC3C,CACF,EC1BA,IAAMoD,EAA6B,sCAE7BC,CAAAA,EAAAA,CACJ,gJAGIC,CAAAA,CAAAA,CAA4B7B,yBAAgBC,2BAAsBC,CAAAA,4CAAqC,CAAE,CAAA,KAAA,CAAM,CACnH,IAAMyB,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOzB,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,IAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,IACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKkC,CAAAA,EAAAA,CAA8B5D,EAG9B6D,CAAN,CAAA,cAAkC3D,CAAkB,CAClD,YAAYG,CAA0C,CAAA,CACpD,KAAMsD,CAAAA,CAAAA,CAA2BtD,CAAO,EAC1C,CACF,EC1BA,IAAMyD,CAA8B,CAAA,8BAAA,CAE9BC,GACJ,0RAIIC,CAAAA,CAAAA,CAA6BlC,yBAAgBC,2BAAsBkC,CAAAA,oCAA6B,EAAE,KAAM,CAAA,CAC5G,IAAMH,CAAAA,CAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAO9B,CAAAA,8BAAAA,CACP,UAAYiC,CAAAA,gCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKxC,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKyC,CAAAA,EAAAA,CAA8BnE,CAG9BoE,CAAAA,CAAAA,CAAN,cAAmClE,CAAkB,CACnD,YAAYG,CAA0C,CAAA,CACpD,MAAM2D,CAA4B3D,CAAAA,CAAO,EAC3C,CACF,EC3BA,IAAMgE,EAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,oQAGIC,CAAAA,CAAAA,CAAqBzC,yBAAgBC,2BAAsBC,CAAAA,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAMqC,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOrC,CAAAA,8BAAAA,CACP,WAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEK8C,CAAAA,EAAAA,CAAuBxE,EAGvByE,CAAN,CAAA,cAA2BvE,CAAkB,CAC3C,YAAYG,CAAmC,CAAA,CAC7C,KAAMkE,CAAAA,CAAAA,CAAoBlE,CAAO,EACnC,CACF,EC1BA,IAAMqE,CAAsB,CAAA,sBAAA,CAEtBC,GACJ,oQAGIC,CAAAA,CAAAA,CAAqB9C,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAM0C,CAAAA,CAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAO1C,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,EAEKmD,EAAuB7E,CAAAA,CAAAA,CAGvB8E,CAAN,CAAA,cAA2B5E,CAAkB,CAC3C,WAAA,CAAYG,CAAmC,CAAA,CAC7C,MAAMuE,CAAoBvE,CAAAA,CAAO,EACnC,CACF,MC1BM0E,CAAuB,CAAA,yBAAA,CAEvBC,GACJ,6NAGIC,CAAAA,CAAAA,CAAsBnD,yBAAgBC,2BAAsBC,CAAAA,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC7G,IAAM+C,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAO/C,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKwD,CAAAA,EAAAA,CAAwBlF,EAGxBmF,CAAN,CAAA,cAA4BjF,CAAkB,CAC5C,WAAA,CAAYG,EAAoC,CAC9C,KAAA,CAAM4E,CAAqB5E,CAAAA,CAAO,EACpC,CACF,EC1BA,IAAM+E,EAAsB,CAAA,sBAAA,CAEtBC,GACJ,6NAGIC,CAAAA,EAAAA,CAAqBxD,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAMoD,GACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOpD,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,EAEK6D,EAAuBvF,CAAAA,CAAAA,CAGvBwF,CAAN,CAAA,cAA2BtF,CAAkB,CAC3C,WAAA,CAAYG,CAAmC,CAAA,CAC7C,MAAMiF,EAAoBjF,CAAAA,CAAO,EACnC,CACF,EfkBMoF,IAAAA,EAAAA,CAAkB,OAClBjF,CAAN,CAAA,KAAyF,CAAzF,WACE,EAAA,CAAA,IAAA,CAAS,OAAU,CAAA,IAAA,CACnB,KAAS,IAAOiF,CAAAA,EAAAA,CAGhB,IAAiB,CAAA,kBAAA,CAKV,CACH,CAACpD,CAAsB,EAAG,CACxB,MAAOI,CACP,CAAA,YAAA,CAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CACA,CAAA,CAACG,CAAiB,EAAG,CACnB,KAAOI,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaD,CACf,CAAA,CACA,CAACG,CAAkB,EAAG,CACpB,KAAA,CAAOI,EACP,YAAcD,CAAAA,EAAAA,CACd,YAAaD,CACf,CAAA,CACA,CAACmC,EAAmB,EAAG,CACrB,KAAA,CAAOI,CACP,CAAA,YAAA,CAAcD,GACd,WAAaD,CAAAA,EACf,CACA,CAAA,CAAC7B,CAA0B,EAAG,CAC5B,MAAOI,CACP,CAAA,YAAA,CAAcD,GACd,WAAaD,CAAAA,CACf,CACA,CAAA,CAACoB,CAAoB,EAAG,CACtB,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAAC7B,CAA2B,EAAG,CAC7B,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAACQ,CAA2B,EAAG,CAC7B,KAAA,CAAOM,EACP,YAAcD,CAAAA,EAAAA,CACd,WAAaH,CAAAA,CACf,EACA,CAACK,CAAmB,EAAG,CACrB,MAAOI,CACP,CAAA,YAAA,CAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CACA,CAAA,CAACG,CAAmB,EAAG,CACrB,KAAOI,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaD,CACf,CAAA,CACA,CAACjD,CAAmB,EAAG,CACrB,KAAOS,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaN,CACf,CACF,CAEF,CAAA,IAAA,CAAiB,wBAKZ,GAAC,CAEN,iBAA8B,EAAA,CAC5B,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,kBAAkB,CAC5C,CAEA,SAAU6D,CAAAA,CAAAA,CAAcrF,CAAyB,CAAA,CAC/C,GAAI,EAAEqF,CAAAA,IAAQ,IAAK,CAAA,kBAAA,CAAA,CACjB,MAAM,IAAIC,sBAAAA,CAAc,CACtB,IAAA,CAAM,oBAAoBD,CAAI,CAAA,UAAA,CAAA,CAC9B,MAAO,IAAI,KAAA,CAAM,oBAAoBA,CAAI,CAAA;AAAA,UACrC,EAAA,IAAA,CAAK,iBAAkB,EAAA,CAAE,IAAK,CAAA,IAAI,CAAC,CAAA,CAAE,CAC3C,CAAC,CAGH,CAAA,IAAME,CAAQ,CAAA,IAAA,CAAK,mBAAmBF,CAAI,CAAA,CAAE,KACtCG,CAAAA,CAAAA,CAAeC,CAAA,CAAA,EAAA,CAChBzF,CAECC,CAAAA,CAAAA,CAAAA,CAAgB,IAAK,CAAA,kBAAA,CAAmBoF,CAAI,CAAA,CAAE,YAAa,CAAA,KAAA,CAAMG,CAAY,CACnF,CAAA,OAAO,IAAID,CAAAA,CAAMtF,CAAa,CAChC,CAEA,eAAA,CAAgBoF,CAAmC,CAAA,CACjD,GAAI,EAAEA,CAAQ,IAAA,IAAA,CAAK,oBACjB,MAAM,IAAIC,sBAAc,CAAA,CACtB,IAAM,CAAA,CAAA,iBAAA,EAAoBD,CAAI,CAAA,UAAA,CAAA,CAC9B,KAAO,CAAA,IAAI,KAAM,CAAA,CAAA,iBAAA,EAAoBA,CAAI,CAAA;AAAA,UAAA,EACrC,IAAK,CAAA,iBAAA,EAAoB,CAAA,IAAA,CAAK,IAAI,CAAC,CAAA,CAAE,CAC3C,CAAC,EAEH,OAAO,IAAA,CAAK,kBAAmBA,CAAAA,CAAI,EAAE,WACvC,CAEA,gBAAwD,EAAA,CACtD,OAAO,MAAA,CAAO,IAAK,CAAA,IAAA,CAAK,kBAAkB,CAAE,CAAA,MAAA,CAAO,CAACK,CAAAA,CAAKC,KACvDD,CAAIC,CAAAA,CAAG,CAAI,CAAA,IAAA,CAAK,mBAAmBA,CAAG,CAAA,CAAE,WACjCD,CAAAA,CAAAA,CAAAA,CACN,EAAyC,CAC9C,CAEA,wBAAmC,CACjC,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,uBAAuB,CACjD,CAEA,cAAA,CAAeL,EAAcrF,CAA8B,CAAA,CACzD,GAAI,EAAEqF,CAAQ,IAAA,IAAA,CAAK,uBACjB,CAAA,CAAA,MAAM,IAAIC,sBAAc,CAAA,CACtB,IAAM,CAAA,CAAA,sBAAA,EAAyBD,CAAI,CACnC,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,yBAAyBA,CAAI,CAAA;AAAA,UAC1C,EAAA,IAAA,CAAK,sBAAuB,EAAA,CAAE,IAAK,CAAA,IAAI,CAAC,CAAA,CAAE,CAChD,CAAC,CAGH,CAAA,IAAME,CAAQ,CAAA,IAAA,CAAK,wBAAwBF,CAAI,CAAA,CAAE,KAC3CG,CAAAA,CAAAA,CAAeC,CAAA,CAAA,EAAA,CAChBzF,CAECC,CAAAA,CAAAA,CAAAA,CAAgB,IAAK,CAAA,uBAAA,CAAwBoF,CAAI,CAAA,CAAE,YAAa,CAAA,KAAA,CAAMG,CAAY,CACxF,CAAA,OAAO,IAAID,CAAAA,CAAMtF,CAAa,CAChC,CAEA,oBAAA,CAAqBoF,CAAwC,CAAA,CAC3D,GAAI,EAAEA,CAAQ,IAAA,IAAA,CAAK,yBACjB,MAAM,IAAIC,sBAAc,CAAA,CACtB,IAAM,CAAA,CAAA,sBAAA,EAAyBD,CAAI,CAAA,UAAA,CAAA,CACnC,KAAO,CAAA,IAAI,KAAM,CAAA,CAAA,sBAAA,EAAyBA,CAAI,CAAA;AAAA,UAC1C,EAAA,IAAA,CAAK,wBAAyB,CAAA,IAAA,CAAK,IAAI,CAAC,CAAA,CAAE,CAChD,CAAC,CAAA,CAEH,OAAO,IAAK,CAAA,uBAAA,CAAwBA,CAAI,CAAE,CAAA,WAC5C,CAEA,qBAAkE,EAAA,CAChE,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,uBAAuB,CAAA,CAAE,OAAO,CAACK,CAAAA,CAAKC,KAC5DD,CAAIC,CAAAA,CAAG,EAAI,IAAK,CAAA,uBAAA,CAAwBA,CAAG,CAAE,CAAA,WAAA,CACtCD,GACN,EAA8C,CACnD,CACF,EAxJMvF,EAGY,OAAU,CAAA,gCAAA","file":"index.js","sourcesContent":["import { z } from \"zod\";\n\nimport { \n ChatModelV1, \n ChatModelSchemaType, \n EmbeddingModelV1, \n EmbeddingModelSchemaType, \n ProviderV1, \n ProviderError \n} from \"@adaline/provider\";\n\nimport {\n LlamaGuard_3_8b,\n LlamaGuard_3_8bOptions,\n LlamaGuard_3_8bSchema,\n LlamaGuard_3_8bLiteral,\n Llama_3_8b,\n Llama_3_8bOptions,\n Llama_3_8bSchema,\n Llama_3_8bLiteral,\n Llama_3_70b,\n Llama_3_70bOptions,\n Llama_3_70bSchema,\n Llama_3_70bLiteral,\n Llama_3_1_8b,\n Llama_3_1_8b_Options,\n Llama_3_1_8bSchema,\n Llama_3_1_8bLiteral,\n Llama_3_1_70b,\n Llama_3_1_70b_Options,\n Llama_3_1_70bSchema,\n Llama_3_1_70bLiteral,\n Llama_3_8b_Tool_Use,\n Llama_3_8b_Tool_Use_Options,\n Llama_3_8b_Tool_UseSchema,\n Llama_3_8b_Tool_UseLiteral,\n Llama_3_70b_Tool_Use,\n Llama_3_70b_Tool_Use_Options,\n Llama_3_70b_Tool_UseSchema,\n Llama_3_70b_Tool_UseLiteral,\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n Llama_3_2_3b,\n Llama_3_2_3b_Options,\n Llama_3_2_3bSchema,\n Llama_3_2_3bLiteral,\n Llama_3_2_1b,\n Llama_3_2_1b_Options,\n Llama_3_2_1bSchema,\n Llama_3_2_1bLiteral,\n Mixtral_8x7b,\n Mixtral_8x7bOptions,\n Mixtral_8x7bSchema,\n Mixtral_8x7bLiteral,\n} from \"./../models\";\n\nconst ProviderLiteral = \"groq\";\nclass Groq<O extends Record<string, any> = Record<string, any>> implements ProviderV1<O> {\n readonly version = \"v1\" as const;\n readonly name = ProviderLiteral;\n static readonly baseUrl = \"https://api.groq.com/openai/v1\";\n\n private readonly chatModelFactories: \n Record<string, { \n model: { new (options: any): ChatModelV1 },\n modelOptions: z.ZodType<any>,\n modelSchema: ChatModelSchemaType\n }> = {\n [LlamaGuard_3_8bLiteral]: {\n model: LlamaGuard_3_8b,\n modelOptions: LlamaGuard_3_8bOptions,\n modelSchema: LlamaGuard_3_8bSchema,\n },\n [Llama_3_8bLiteral]: {\n model: Llama_3_8b,\n modelOptions: Llama_3_8bOptions,\n modelSchema: Llama_3_8bSchema,\n },\n [Llama_3_70bLiteral]: {\n model: Llama_3_70b,\n modelOptions: Llama_3_70bOptions,\n modelSchema: Llama_3_70bSchema,\n },\n [Llama_3_1_8bLiteral]: {\n model: Llama_3_1_8b,\n modelOptions: Llama_3_1_8b_Options,\n modelSchema: Llama_3_1_8bSchema,\n },\n [Llama_3_8b_Tool_UseLiteral]: {\n model: Llama_3_8b_Tool_Use,\n modelOptions: Llama_3_8b_Tool_Use_Options,\n modelSchema: Llama_3_8b_Tool_UseSchema,\n },\n [Llama_3_1_70bLiteral]: {\n model: Llama_3_1_70b,\n modelOptions: Llama_3_1_70b_Options,\n modelSchema: Llama_3_1_70bSchema,\n },\n [Llama_3_70b_Tool_UseLiteral]: {\n model: Llama_3_70b_Tool_Use,\n modelOptions: Llama_3_70b_Tool_Use_Options,\n modelSchema: Llama_3_70b_Tool_UseSchema,\n },\n [Llama_3_2_11b_VisionLiteral]: {\n model: Llama_3_2_11b_Vision,\n modelOptions: Llama_3_2_11b_VisionOptions,\n modelSchema: Llama_3_2_11b_VisionSchema,\n },\n [Llama_3_2_3bLiteral]: {\n model: Llama_3_2_3b,\n modelOptions: Llama_3_2_3b_Options,\n modelSchema: Llama_3_2_3bSchema,\n },\n [Llama_3_2_1bLiteral]: {\n model: Llama_3_2_1b,\n modelOptions: Llama_3_2_1b_Options,\n modelSchema: Llama_3_2_1bSchema,\n },\n [Mixtral_8x7bLiteral]: {\n model: Mixtral_8x7b,\n modelOptions: Mixtral_8x7bOptions,\n modelSchema: Mixtral_8x7bSchema,\n },\n };\n\n private readonly embeddingModelFactories: \n Record<string, { \n model: { new (options: any): EmbeddingModelV1 },\n modelOptions: z.ZodType<any>,\n modelSchema: EmbeddingModelSchemaType\n }> = {};\n\n chatModelLiterals(): string[] {\n return Object.keys(this.chatModelFactories);\n }\n\n chatModel(name: string, options: O): ChatModelV1 {\n if (!(name in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${name} not found`,\n cause: new Error(`Groq chat model: ${name} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.chatModelFactories[name].model;\n const modelOptions = {\n ...options,\n };\n const parsedOptions = this.chatModelFactories[name].modelOptions.parse(modelOptions);\n return new model(parsedOptions);\n }\n\n chatModelSchema(name: string): ChatModelSchemaType {\n if (!(name in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${name} not found`,\n cause: new Error(`Groq chat model: ${name} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n return this.chatModelFactories[name].modelSchema;\n }\n\n chatModelSchemas(): Record<string, ChatModelSchemaType> {\n return Object.keys(this.chatModelFactories).reduce((acc, key) => {\n acc[key] = this.chatModelFactories[key].modelSchema;\n return acc;\n }, {} as Record<string, ChatModelSchemaType>);\n }\n\n embeddingModelLiterals(): string[] {\n return Object.keys(this.embeddingModelFactories);\n }\n\n embeddingModel(name: string, options: O): EmbeddingModelV1 {\n if (!(name in this.embeddingModelFactories)) {\n throw new ProviderError({\n info: `Groq embedding model: ${name} not found`,\n cause: new Error(`Groq embedding model: ${name} not found, available embedding models: \n ${this.embeddingModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.embeddingModelFactories[name].model;\n const modelOptions = {\n ...options,\n };\n const parsedOptions = this.embeddingModelFactories[name].modelOptions.parse(modelOptions);\n return new model(parsedOptions);\n }\n\n embeddingModelSchema(name: string): EmbeddingModelSchemaType {\n if (!(name in this.embeddingModelFactories)) {\n throw new ProviderError({\n info: `Groq embedding model: ${name} not found`,\n cause: new Error(`Groq embedding model: ${name} not found, available embedding models: \n ${this.embeddingModelLiterals().join(\", \")}`),\n });\n }\n return this.embeddingModelFactories[name].modelSchema;\n }\n\n embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType> {\n return Object.keys(this.embeddingModelFactories).reduce((acc, key) => {\n acc[key] = this.embeddingModelFactories[key].modelSchema;\n return acc;\n }, {} as Record<string, EmbeddingModelSchemaType>);\n }\n}\n\nexport { Groq };","import { z } from \"zod\";\n\nimport { BaseChatModel } from \"@adaline/openai\";\nimport { ChatModelSchemaType } from \"@adaline/provider\";\n\nimport { Groq } from \"../../provider\";\n\nconst BaseChatModelOptions = z.object({\n apiKey: z.string().min(1),\n});\ntype BaseChatModelOptionsType = z.infer<typeof BaseChatModelOptions>;\n\nclass BaseChatModelGroq extends BaseChatModel {\n readonly version = \"v1\" as const;\n modelSchema: ChatModelSchemaType;\n\n private readonly groqApiKey: string;\n\n constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType) {\n const parsedOptions = BaseChatModelOptions.parse(options);\n const baseUrl = Groq.baseUrl;\n super(modelSchema, {\n apiKey: parsedOptions.apiKey,\n baseUrl: baseUrl,\n completeChatUrl: `${baseUrl}/chat/completions`,\n streamChatUrl: `${baseUrl}/chat/completions`,\n });\n this.modelSchema = modelSchema;\n this.groqApiKey = parsedOptions.apiKey;\n }\n\n // TODO: streamChat needs it's own implementation, OpenAI stream transformer is skipping some chunks\n}\n\nexport { \n BaseChatModelGroq, \n BaseChatModelOptions, \n type BaseChatModelOptionsType,\n};\n","import { CHAT_CONFIG, MultiStringConfigItem, RangeConfigItem, SelectStringConfigItem } from \"@adaline/provider\";\n\nconst temperature = RangeConfigItem({\n param: \"temperature\",\n title: CHAT_CONFIG.TEMPERATURE.title,\n description: CHAT_CONFIG.TEMPERATURE.description,\n min: 0,\n max: 2,\n step: 0.01,\n default: 1,\n});\n\nconst maxTokens = (maxOutputTokens: number) =>\n RangeConfigItem({\n param: \"max_tokens\",\n title: CHAT_CONFIG.MAX_TOKENS.title,\n description: CHAT_CONFIG.MAX_TOKENS.description,\n min: 0,\n max: maxOutputTokens,\n step: 1,\n default: 0,\n });\n\nconst stop = MultiStringConfigItem({\n param: \"stop\",\n title: CHAT_CONFIG.STOP(4).title,\n description: CHAT_CONFIG.STOP(4).description,\n max: 4,\n });\n\nconst topP = RangeConfigItem({\n param: \"top_p\",\n title: CHAT_CONFIG.TOP_P.title,\n description: CHAT_CONFIG.TOP_P.description,\n min: 0,\n max: 1,\n step: 0.01,\n default: 1,\n});\n\nconst frequencyPenalty = RangeConfigItem({\n param: \"frequency_penalty\",\n title: CHAT_CONFIG.FREQUENCY_PENALTY.title,\n description: CHAT_CONFIG.FREQUENCY_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst presencePenalty = RangeConfigItem({\n param: \"presence_penalty\",\n title: CHAT_CONFIG.PRESENCE_PENALTY.title,\n description: CHAT_CONFIG.PRESENCE_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst seed = RangeConfigItem({\n param: \"seed\",\n title: CHAT_CONFIG.SEED.title,\n description: CHAT_CONFIG.SEED.description,\n min: 0,\n max: 1000000,\n step: 1,\n default: 0,\n});\n\nconst responseFormat = SelectStringConfigItem({\n param: \"response_format\",\n title: CHAT_CONFIG.RESPONSE_FORMAT.title,\n description: CHAT_CONFIG.RESPONSE_FORMAT.description,\n default: \"text\",\n choices: [\"text\", \"json_object\"],\n});\n\nconst toolChoice = SelectStringConfigItem({\n param: \"tool_choice\",\n title: \"Tool choice\",\n description:\n \"Controls which (if any) tool is called by the model. \\\n 'none' means the model will not call a function. \\\n 'auto' means the model can pick between generating a message or calling a tool.\",\n default: \"auto\",\n choices: [\"auto\", \"required\", \"none\"],\n});\n\nexport { \n frequencyPenalty, \n maxTokens, \n presencePenalty, \n seed, \n stop, \n temperature, \n toolChoice, \n topP, \n responseFormat \n};\n","import { z } from \"zod\";\n\nimport {\n frequencyPenalty,\n maxTokens,\n presencePenalty,\n seed,\n stop,\n temperature,\n toolChoice,\n topP,\n responseFormat,\n} from \"./common.config.chat-model.groq\";\n\nconst ChatModelBaseConfigSchema = (maxOutputTokens: number) =>\n z.object({\n temperature: temperature.schema,\n maxTokens: maxTokens(maxOutputTokens).schema,\n stop: stop.schema,\n topP: topP.schema,\n frequencyPenalty: frequencyPenalty.schema,\n presencePenalty: presencePenalty.schema,\n seed: seed.schema.transform((value) => (value === 0 ? undefined : value)),\n responseFormat: responseFormat.schema,\n toolChoice: toolChoice.schema,\n });\n\nconst ChatModelBaseConfigDef = (maxOutputTokens: number) =>\n ({\n temperature: temperature.def,\n maxTokens: maxTokens(maxOutputTokens).def,\n stop: stop.def,\n topP: topP.def,\n frequencyPenalty: frequencyPenalty.def,\n presencePenalty: presencePenalty.def,\n seed: seed.def,\n responseFormat: responseFormat.def,\n toolChoice: toolChoice.def,\n }) as const;\n\nexport { ChatModelBaseConfigDef, ChatModelBaseConfigSchema };\n","import {\n ChatModelBaseConfigDef,\n ChatModelBaseConfigSchema,\n} from \"./chat-model\";\n\nconst GroqChatModelConfigs = {\n base: (maxOutputTokens: number) => ({\n def: ChatModelBaseConfigDef(maxOutputTokens),\n schema: ChatModelBaseConfigSchema(maxOutputTokens),\n }),\n} as const;\n\nexport { GroqChatModelConfigs };\n","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Mixtral_8x7bLiteral = \"mixtral-8x7b-32768\" as const;\n// https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1\nconst Mixtral_8x7bDescription = \n \"The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts.\";\n\nconst Mixtral_8x7bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Mixtral_8x7bLiteral,\n description: Mixtral_8x7bDescription,\n maxInputTokens: 32768,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Mixtral_8x7bOptions = BaseChatModelOptions;\ntype Mixtral_8x7bOptionsType = z.infer<typeof Mixtral_8x7bOptions>;\n\nclass Mixtral_8x7b extends BaseChatModelGroq {\n constructor(options: Mixtral_8x7bOptionsType) {\n super(Mixtral_8x7bSchema, options);\n }\n}\n\nexport {\n Mixtral_8x7b,\n Mixtral_8x7bOptions,\n Mixtral_8x7bSchema,\n Mixtral_8x7bLiteral,\n type Mixtral_8x7bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst LlamaGuard_3_8bLiteral = \"llama-guard-3-8b\" as const;\n// https://huggingface.co/meta-llama/Llama-Guard-3-8B\nconst LlamaGuard_3_8bDescription = \n \"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.\";\n\nconst LlamaGuard_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: LlamaGuard_3_8bLiteral,\n description: LlamaGuard_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst LlamaGuard_3_8bOptions = BaseChatModelOptions;\ntype LlamaGuard_3_8bOptionsType = z.infer<typeof LlamaGuard_3_8bOptions>;\n\nclass LlamaGuard_3_8b extends BaseChatModelGroq {\n constructor(options: LlamaGuard_3_8bOptionsType) {\n super(LlamaGuard_3_8bSchema, options);\n }\n}\n\nexport {\n LlamaGuard_3_8b,\n LlamaGuard_3_8bOptions,\n LlamaGuard_3_8bSchema,\n LlamaGuard_3_8bLiteral,\n type LlamaGuard_3_8bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8bLiteral = \"llama3-8b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct\nconst Llama_3_8bDescription = \n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8bLiteral,\n description: Llama_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8bOptions = BaseChatModelOptions;\ntype Llama_3_8bOptionsType = z.infer<typeof Llama_3_8bOptions>;\n\nclass Llama_3_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_8bOptionsType) {\n super(Llama_3_8bSchema, options);\n }\n}\n\nexport {\n Llama_3_8b,\n Llama_3_8bOptions,\n Llama_3_8bSchema,\n Llama_3_8bLiteral,\n type Llama_3_8bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70bLiteral = \"llama3-70b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct\nconst Llama_3_70bDescription = \n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70bLiteral,\n description: Llama_3_70bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70bOptions = BaseChatModelOptions;\ntype Llama_3_70bOptionsType = z.infer<typeof Llama_3_70bOptions>;\n\nclass Llama_3_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_70bOptionsType) {\n super(Llama_3_70bSchema, options);\n }\n}\n\nexport {\n Llama_3_70b,\n Llama_3_70bOptions,\n Llama_3_70bSchema,\n Llama_3_70bLiteral,\n type Llama_3_70bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70b_Tool_UseLiteral = \"llama3-groq-70b-8192-tool-use-preview\" as const;\n// https://huggingface.co/Groq/Llama-3-Groq-70B-Tool-Use\nconst Llama_3_70b_Tool_UseDescription = \n \"This is the 70B parameter version of the Llama 3 Groq Tool Use model, \\\n specifically designed for advanced tool use and function calling tasks.\";\n\nconst Llama_3_70b_Tool_UseSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70b_Tool_UseLiteral,\n description: Llama_3_70b_Tool_UseDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70b_Tool_Use_Options = BaseChatModelOptions;\ntype Llama_3_70b_Tool_Use_OptionsType = z.infer<typeof Llama_3_70b_Tool_Use_Options>;\n\nclass Llama_3_70b_Tool_Use extends BaseChatModelGroq {\n constructor(options: Llama_3_70b_Tool_Use_OptionsType) {\n super(Llama_3_70b_Tool_UseSchema, options);\n }\n}\n\nexport {\n Llama_3_70b_Tool_Use,\n Llama_3_70b_Tool_Use_Options,\n Llama_3_70b_Tool_UseSchema,\n Llama_3_70b_Tool_UseLiteral,\n type Llama_3_70b_Tool_Use_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8b_Tool_UseLiteral = \"llama3-groq-8b-8192-tool-use-preview\" as const;\n// https://huggingface.co/Groq/Llama-3-Groq-8B-Tool-Use\nconst Llama_3_8b_Tool_UseDescription = \n \"This is the 8B parameter version of the Llama 3 Groq Tool Use model, \\\n specifically designed for advanced tool use and function calling tasks.\";\n\nconst Llama_3_8b_Tool_UseSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8b_Tool_UseLiteral,\n description: Llama_3_8b_Tool_UseDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8b_Tool_Use_Options = BaseChatModelOptions;\ntype Llama_3_8b_Tool_Use_OptionsType = z.infer<typeof Llama_3_8b_Tool_Use_Options>;\n\nclass Llama_3_8b_Tool_Use extends BaseChatModelGroq {\n constructor(options: Llama_3_8b_Tool_Use_OptionsType) {\n super(Llama_3_8b_Tool_UseSchema, options);\n }\n}\n\nexport {\n Llama_3_8b_Tool_Use,\n Llama_3_8b_Tool_Use_Options,\n Llama_3_8b_Tool_UseSchema,\n Llama_3_8b_Tool_UseLiteral,\n type Llama_3_8b_Tool_Use_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelModalities,\n OpenAIChatModelModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_11b_VisionLiteral = \"llama-3.2-11b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-11B-Vision\nconst Llama_3_2_11b_VisionDescription = \n \"The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, \\\n and answering general questions about an image. \\\n The models outperform many of the available open source and closed multimodal models on common industry benchmarks.\";\n\nconst Llama_3_2_11b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_11b_VisionLiteral,\n description: Llama_3_2_11b_VisionDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_11b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_11b_VisionOptionsType = z.infer<typeof Llama_3_2_11b_VisionOptions>;\n\nclass Llama_3_2_11b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_11b_VisionOptionsType) {\n super(Llama_3_2_11b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n type Llama_3_2_11b_VisionOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_3bLiteral = \"llama-3.2-3b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-3B\nconst Llama_3_2_3bDescription = \n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_3bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_3bLiteral,\n description: Llama_3_2_3bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_3b_Options = BaseChatModelOptions;\ntype Llama_3_2_3b_OptionsType = z.infer<typeof Llama_3_2_3b_Options>;\n\nclass Llama_3_2_3b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_3b_OptionsType) {\n super(Llama_3_2_3bSchema, options);\n }\n}\n\nexport {\n Llama_3_2_3b,\n Llama_3_2_3b_Options,\n Llama_3_2_3bSchema,\n Llama_3_2_3bLiteral,\n type Llama_3_2_3b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_1bLiteral = \"llama-3.2-1b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-1B\nconst Llama_3_2_1bDescription = \n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_1bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_1bLiteral,\n description: Llama_3_2_1bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_1b_Options = BaseChatModelOptions;\ntype Llama_3_2_1b_OptionsType = z.infer<typeof Llama_3_2_1b_Options>;\n\nclass Llama_3_2_1b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_1b_OptionsType) {\n super(Llama_3_2_1bSchema, options);\n }\n}\n\nexport {\n Llama_3_2_1b,\n Llama_3_2_1b_Options,\n Llama_3_2_1bSchema,\n Llama_3_2_1bLiteral,\n type Llama_3_2_1b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_70bLiteral = \"llama-3.1-70b-versatile\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_70bDescription = \n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_70bLiteral,\n description: Llama_3_1_70bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_70b_Options = BaseChatModelOptions;\ntype Llama_3_1_70b_OptionsType = z.infer<typeof Llama_3_1_70b_Options>;\n\nclass Llama_3_1_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_70b_OptionsType) {\n super(Llama_3_1_70bSchema, options);\n }\n}\n\nexport {\n Llama_3_1_70b,\n Llama_3_1_70b_Options,\n Llama_3_1_70bSchema,\n Llama_3_1_70bLiteral,\n type Llama_3_1_70b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_8bLiteral = \"llama-3.1-8b-instant\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_8bDescription = \n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_8bLiteral,\n description: Llama_3_1_8bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_8b_Options = BaseChatModelOptions;\ntype Llama_3_1_8b_OptionsType = z.infer<typeof Llama_3_1_8b_Options>;\n\nclass Llama_3_1_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_8b_OptionsType) {\n super(Llama_3_1_8bSchema, options);\n }\n}\n\nexport {\n Llama_3_1_8b,\n Llama_3_1_8b_Options,\n Llama_3_1_8bSchema,\n Llama_3_1_8bLiteral,\n type Llama_3_1_8b_OptionsType,\n}"]}
|
|
1
|
+
{"version":3,"sources":["../src/models/chat-models/base-chat-model.groq.ts","../src/configs/chat-model/common.config.chat-model.groq.ts","../src/configs/chat-model/base.config.chat-model.groq.ts","../src/configs/configs.groq.ts","../src/models/chat-models/mixtral-8x7b.groq.ts","../src/models/chat-models/llama-guard-3-8b.groq.ts","../src/models/chat-models/llama-3-8b.groq.ts","../src/models/chat-models/llama-3-70b.groq.ts","../src/models/chat-models/llama-3-70b-tool-use.groq.ts","../src/models/chat-models/llama-3-8b-tool-use.groq.ts","../src/models/chat-models/llama-3-2-11b-vision.groq.ts","../src/models/chat-models/llama-3-2-3b.groq.ts","../src/models/chat-models/llama-3-2-1b.groq.ts","../src/models/chat-models/llama-3-1-70b.groq.ts","../src/models/chat-models/llama-3-1-8b.groq.ts","../src/provider/provider.groq.ts"],"names":["BaseChatModelOptions","z","BaseChatModelGroq","BaseChatModel","modelSchema","options","parsedOptions","baseUrl","Groq","messages","transformedMessages","message","content","temperature","RangeConfigItem","CHAT_CONFIG","maxTokens","maxOutputTokens","stop","MultiStringConfigItem","topP","frequencyPenalty","presencePenalty","seed","responseFormat","SelectStringConfigItem","toolChoice","ChatModelBaseConfigSchema","value","ChatModelBaseConfigDef","GroqChatModelConfigs","Mixtral_8x7bLiteral","Mixtral_8x7bDescription","Mixtral_8x7bSchema","ChatModelSchema","OpenAIChatModelRoles","OpenAIChatModelTextToolModalitiesEnum","OpenAIChatModelRolesMap","OpenAIChatModelTextToolModalities","Mixtral_8x7bOptions","Mixtral_8x7b","LlamaGuard_3_8bLiteral","LlamaGuard_3_8bDescription","LlamaGuard_3_8bSchema","LlamaGuard_3_8bOptions","LlamaGuard_3_8b","Llama_3_8bLiteral","Llama_3_8bDescription","Llama_3_8bSchema","Llama_3_8bOptions","Llama_3_8b","Llama_3_70bLiteral","Llama_3_70bDescription","Llama_3_70bSchema","Llama_3_70bOptions","Llama_3_70b","Llama_3_70b_Tool_UseLiteral","Llama_3_70b_Tool_UseDescription","Llama_3_70b_Tool_UseSchema","Llama_3_70b_Tool_Use_Options","Llama_3_70b_Tool_Use","Llama_3_8b_Tool_UseLiteral","Llama_3_8b_Tool_UseDescription","Llama_3_8b_Tool_UseSchema","Llama_3_8b_Tool_Use_Options","Llama_3_8b_Tool_Use","Llama_3_2_11b_VisionLiteral","Llama_3_2_11b_VisionDescription","Llama_3_2_11b_VisionSchema","OpenAIChatModelModalitiesEnum","OpenAIChatModelModalities","Llama_3_2_11b_VisionOptions","Llama_3_2_11b_Vision","Llama_3_2_3bLiteral","Llama_3_2_3bDescription","Llama_3_2_3bSchema","Llama_3_2_3b_Options","Llama_3_2_3b","Llama_3_2_1bLiteral","Llama_3_2_1bDescription","Llama_3_2_1bSchema","Llama_3_2_1b_Options","Llama_3_2_1b","Llama_3_1_70bLiteral","Llama_3_1_70bDescription","Llama_3_1_70bSchema","Llama_3_1_70b_Options","Llama_3_1_70b","Llama_3_1_8bLiteral","Llama_3_1_8bDescription","Llama_3_1_8bSchema","Llama_3_1_8b_Options","Llama_3_1_8b","ProviderLiteral","name","ProviderError","model","modelOptions","__spreadValues","acc","key"],"mappings":";;;;;;yWAQMA,CAAuBC,CAAAA,KAAAA,CAAE,OAAO,CACpC,MAAA,CAAQA,MAAE,MAAO,EAAA,CAAE,GAAI,CAAA,CAAC,CAC1B,CAAC,EAGKC,CAAN,CAAA,cAAgCC,oBAAc,CAM5C,WAAA,CAAYC,EAAkCC,CAAmC,CAAA,CAC/E,IAAMC,CAAAA,CAAgBN,CAAqB,CAAA,KAAA,CAAMK,CAAO,CAClDE,CAAAA,CAAAA,CAAUC,CAAK,CAAA,OAAA,CACrB,KAAMJ,CAAAA,CAAAA,CAAa,CACjB,MAAQE,CAAAA,CAAAA,CAAc,MACtB,CAAA,OAAA,CAASC,CACT,CAAA,eAAA,CAAiB,GAAGA,CAAO,CAAA,iBAAA,CAAA,CAC3B,cAAe,CAAGA,EAAAA,CAAO,mBAC3B,CAAC,CAAA,CAbH,IAAS,CAAA,OAAA,CAAU,IAcjB,CAAA,IAAA,CAAK,YAAcH,CACnB,CAAA,IAAA,CAAK,WAAaE,CAAc,CAAA,OAClC,CAEA,iBAAkBG,CAAAA,CAAAA,CAAqC,CACrD,IAAMC,CAAsB,CAAA,KAAA,CAAM,kBAAkBD,CAAQ,CAAA,CAI5D,OAAAC,CAAoB,CAAA,QAAA,CAAS,QAASC,CAAY,EAAA,CAC5CA,CAAQ,CAAA,IAAA,GAAS,QACf,CAAA,OAAOA,EAAQ,OAAY,EAAA,QAAA,GAC7BA,CAAQ,CAAA,OAAA,CAAUA,CAAQ,CAAA,OAAA,CAAQ,IAAKC,CAAYA,EAAAA,CAAAA,CAAQ,IAAI,CAAA,CAAE,IAAK,CAAA,CAAA;AAAA,CAAI,GAEnED,CAAQ,CAAA,IAAA,GAAS,aACtBA,CAAQ,CAAA,OAAA,EAAW,OAAOA,CAAQ,CAAA,OAAA,EAAY,WAChDA,CAAQ,CAAA,OAAA,CAAUA,EAAQ,OAAQ,CAAA,GAAA,CAAKC,GAAYA,CAAQ,CAAA,IAAI,EAAE,IAAK,CAAA,CAAA;AAAA,CAAI,CAGhF,EAAA,CAAC,CAEMF,CAAAA,CACT,CAIF,ECpDA,IAAMG,CAAcC,CAAAA,wBAAAA,CAAgB,CAClC,KAAA,CAAO,aACP,CAAA,KAAA,CAAOC,oBAAY,CAAA,WAAA,CAAY,MAC/B,WAAaA,CAAAA,oBAAAA,CAAY,WAAY,CAAA,WAAA,CACrC,IAAK,CACL,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,IACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKC,EAAaC,CACjBH,EAAAA,wBAAAA,CAAgB,CACd,KAAA,CAAO,aACP,KAAOC,CAAAA,oBAAAA,CAAY,UAAW,CAAA,KAAA,CAC9B,YAAaA,oBAAY,CAAA,UAAA,CAAW,WACpC,CAAA,GAAA,CAAK,EACL,GAAKE,CAAAA,CAAAA,CACL,IAAM,CAAA,CAAA,CACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEGC,EAAOC,8BAAsB,CAAA,CAC/B,KAAO,CAAA,MAAA,CACP,MAAOJ,oBAAY,CAAA,IAAA,CAAK,CAAC,CAAA,CAAE,MAC3B,WAAaA,CAAAA,oBAAAA,CAAY,IAAK,CAAA,CAAC,EAAE,WACjC,CAAA,GAAA,CAAK,CACP,CAAC,EAEGK,CAAON,CAAAA,wBAAAA,CAAgB,CAC3B,KAAA,CAAO,QACP,KAAOC,CAAAA,oBAAAA,CAAY,KAAM,CAAA,KAAA,CACzB,YAAaA,oBAAY,CAAA,KAAA,CAAM,WAC/B,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,QAAS,CACX,CAAC,CAEKM,CAAAA,CAAAA,CAAmBP,yBAAgB,CACvC,KAAA,CAAO,mBACP,CAAA,KAAA,CAAOC,qBAAY,iBAAkB,CAAA,KAAA,CACrC,WAAaA,CAAAA,oBAAAA,CAAY,kBAAkB,WAC3C,CAAA,GAAA,CAAK,CACL,CAAA,CAAA,GAAA,CAAK,EACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAEKO,CAAAA,CAAAA,CAAkBR,wBAAgB,CAAA,CACtC,MAAO,kBACP,CAAA,KAAA,CAAOC,oBAAY,CAAA,gBAAA,CAAiB,KACpC,CAAA,WAAA,CAAaA,oBAAY,CAAA,gBAAA,CAAiB,YAC1C,GAAK,CAAA,CAAA,CAAA,CACL,GAAK,CAAA,CAAA,CACL,KAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,EAEKQ,CAAOT,CAAAA,wBAAAA,CAAgB,CAC3B,KAAA,CAAO,OACP,KAAOC,CAAAA,oBAAAA,CAAY,IAAK,CAAA,KAAA,CACxB,YAAaA,oBAAY,CAAA,IAAA,CAAK,WAC9B,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,GAAA,CACL,IAAM,CAAA,CAAA,CACN,QAAS,CACX,CAAC,CAEKS,CAAAA,CAAAA,CAAiBC,gCAAuB,CAC5C,KAAA,CAAO,iBACP,CAAA,KAAA,CAAOV,qBAAY,eAAgB,CAAA,KAAA,CACnC,WAAaA,CAAAA,oBAAAA,CAAY,gBAAgB,WACzC,CAAA,OAAA,CAAS,MACT,CAAA,OAAA,CAAS,CAAC,MAAQ,CAAA,aAAa,CACjC,CAAC,EAEKW,CAAaD,CAAAA,+BAAAA,CAAuB,CACxC,KAAA,CAAO,cACP,KAAO,CAAA,aAAA,CACP,WACE,CAAA,+LAAA,CAGF,QAAS,MACT,CAAA,OAAA,CAAS,CAAC,MAAA,CAAQ,WAAY,MAAM,CACtC,CAAC,CAAA,CCzED,IAAME,EAAAA,CAA6BV,CACjChB,EAAAA,KAAAA,CAAE,OAAO,CACP,WAAA,CAAaY,CAAY,CAAA,MAAA,CACzB,UAAWG,CAAUC,CAAAA,CAAe,CAAE,CAAA,MAAA,CACtC,KAAMC,CAAK,CAAA,MAAA,CACX,IAAME,CAAAA,CAAAA,CAAK,OACX,gBAAkBC,CAAAA,CAAAA,CAAiB,MACnC,CAAA,eAAA,CAAiBC,EAAgB,MACjC,CAAA,IAAA,CAAMC,CAAK,CAAA,MAAA,CAAO,UAAWK,CAAWA,EAAAA,CAAAA,GAAU,CAAI,CAAA,KAAA,CAAA,CAAYA,CAAM,CACxE,CAAA,cAAA,CAAgBJ,CAAe,CAAA,MAAA,CAC/B,WAAYE,CAAW,CAAA,MACzB,CAAC,CAAA,CAEGG,GAA0BZ,CAC7B,GAAA,CACC,WAAaJ,CAAAA,CAAAA,CAAY,IACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,IACtC,IAAMC,CAAAA,CAAAA,CAAK,GACX,CAAA,IAAA,CAAME,EAAK,GACX,CAAA,gBAAA,CAAkBC,CAAiB,CAAA,GAAA,CACnC,gBAAiBC,CAAgB,CAAA,GAAA,CACjC,IAAMC,CAAAA,CAAAA,CAAK,IACX,cAAgBC,CAAAA,CAAAA,CAAe,GAC/B,CAAA,UAAA,CAAYE,EAAW,GACzB,CAAA,CAAA,CCjCF,IAAMI,CAAAA,CAAuB,CAC3B,IAAA,CAAOb,CAA6B,GAAA,CAClC,IAAKY,EAAuBZ,CAAAA,CAAe,CAC3C,CAAA,MAAA,CAAQU,GAA0BV,CAAe,CACnD,CACF,CAAA,CAAA,KCIMc,CAAsB,CAAA,oBAAA,CAEtBC,EACJ,CAAA,mGAAA,CAEIC,EAAqBC,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAA,CAAE,MAAM,CAC5G,IAAA,CAAML,CACN,CAAA,WAAA,CAAaC,GACb,cAAgB,CAAA,KAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAOK,8BACP,CAAA,UAAA,CAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKS,CAAAA,EAAAA,CAAsBvC,CAGtBwC,CAAAA,CAAAA,CAAN,cAA2BtC,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAkC,CAC5C,KAAM4B,CAAAA,CAAAA,CAAoB5B,CAAO,EACnC,CACF,ECzBA,IAAMoC,CAAyB,CAAA,kBAAA,CAEzBC,GACJ,iGAEIC,CAAAA,CAAAA,CAAwBT,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC/G,IAAA,CAAMK,EACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,IAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOL,8BACP,CAAA,UAAA,CAAYC,yCACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAAA,CAEKc,EAAyB5C,CAAAA,CAAAA,CAGzB6C,EAAN,cAA8B3C,CAAkB,CAC9C,WAAA,CAAYG,EAAqC,CAC/C,KAAA,CAAMsC,CAAuBtC,CAAAA,CAAO,EACtC,CACF,ECzBA,IAAMyC,CAAoB,CAAA,gBAAA,CAEpBC,EACJ,CAAA,yKAAA,CAGIC,CAAmBd,CAAAA,wBAAAA,CAAgBC,4BAAsBC,4CAAqC,CAAA,CAAE,KAAM,CAAA,CAC1G,KAAMU,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,eAAiB,CAAA,IAAA,CACjB,KAAOV,CAAAA,8BAAAA,CACP,WAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKmB,CAAAA,EAAAA,CAAoBjD,EAGpBkD,CAAN,CAAA,cAAyBhD,CAAkB,CACzC,YAAYG,CAAgC,CAAA,CAC1C,KAAM2C,CAAAA,CAAAA,CAAkB3C,CAAO,EACjC,CACF,EC1BM8C,IAAAA,CAAAA,CAAqB,kBAErBC,EACJ,CAAA,yKAAA,CAGIC,CAAoBnB,CAAAA,wBAAAA,CAAgBC,4BAAsBC,4CAAqC,CAAA,CAAE,KAAM,CAAA,CAC3G,IAAMe,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOf,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,IAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,IACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CACF,CAAC,CAAA,CAEKwB,GAAqBtD,CAGrBuD,CAAAA,CAAAA,CAAN,cAA0BrD,CAAkB,CAC1C,WAAYG,CAAAA,CAAAA,CAAiC,CAC3C,KAAA,CAAMgD,EAAmBhD,CAAO,EAClC,CACF,EC1BA,IAAMmD,EAA8B,uCAE9BC,CAAAA,EAAAA,CACJ,iJAGIC,CAAAA,CAAAA,CAA6BxB,yBAAgBC,2BAAsBC,CAAAA,4CAAqC,CAAE,CAAA,KAAA,CAAM,CACpH,IAAMoB,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOpB,8BACP,CAAA,UAAA,CAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,EAEK6B,EAA+B3D,CAAAA,CAAAA,CAG/B4D,CAAN,CAAA,cAAmC1D,CAAkB,CACnD,WAAA,CAAYG,CAA2C,CAAA,CACrD,MAAMqD,CAA4BrD,CAAAA,CAAO,EAC3C,CACF,EC1BMwD,IAAAA,CAAAA,CAA6B,sCAE7BC,CAAAA,EAAAA,CACJ,iJAGIC,CAA4B7B,CAAAA,wBAAAA,CAAgBC,2BAAsBC,CAAAA,4CAAqC,EAAE,KAAM,CAAA,CACnH,IAAMyB,CAAAA,CAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,KACjB,KAAOzB,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAAA,CAEKkC,EAA8BhE,CAAAA,CAAAA,CAG9BiE,EAAN,cAAkC/D,CAAkB,CAClD,WAAA,CAAYG,EAA0C,CACpD,KAAA,CAAM0D,CAA2B1D,CAAAA,CAAO,EAC1C,CACF,EC1BA,IAAM6D,CAA8B,CAAA,8BAAA,CAE9BC,GACJ,0RAIIC,CAAAA,CAAAA,CAA6BlC,wBAAgBC,CAAAA,2BAAAA,CAAsBkC,oCAA6B,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAMH,EACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAO9B,8BACP,CAAA,UAAA,CAAYiC,iCACZ,MAAQ,CAAA,CACN,GAAKxC,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAEKyC,CAAAA,EAAAA,CAA8BvE,EAG9BwE,CAAN,CAAA,cAAmCtE,CAAkB,CACnD,YAAYG,CAA0C,CAAA,CACpD,KAAM+D,CAAAA,CAAAA,CAA4B/D,CAAO,EAC3C,CACF,EC3BMoE,IAAAA,CAAAA,CAAsB,uBAEtBC,EACJ,CAAA,oQAAA,CAGIC,CAAqBzC,CAAAA,wBAAAA,CAAgBC,4BAAsBC,4CAAqC,CAAA,CAAE,KAAM,CAAA,CAC5G,KAAMqC,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,MAChB,eAAiB,CAAA,IAAA,CACjB,KAAOrC,CAAAA,8BAAAA,CACP,WAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEK8C,CAAAA,EAAAA,CAAuB5E,EAGvB6E,CAAN,CAAA,cAA2B3E,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAMsE,EAAoBtE,CAAO,EACnC,CACF,EC1BA,IAAMyE,EAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,oQAGIC,CAAAA,CAAAA,CAAqB9C,yBAAgBC,2BAAsBC,CAAAA,4CAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAM0C,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAO1C,+BACP,UAAYC,CAAAA,wCAAAA,CACZ,MAAQ,CAAA,CACN,IAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,IACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CACF,CAAC,CAAA,CAEKmD,GAAuBjF,CAGvBkF,CAAAA,CAAAA,CAAN,cAA2BhF,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAM2E,EAAoB3E,CAAO,EACnC,CACF,MC1BM8E,CAAuB,CAAA,yBAAA,CAEvBC,EACJ,CAAA,6NAAA,CAGIC,EAAsBnD,wBAAgBC,CAAAA,2BAAAA,CAAsBC,4CAAqC,CAAA,CAAE,MAAM,CAC7G,IAAA,CAAM+C,CACN,CAAA,WAAA,CAAaC,GACb,cAAgB,CAAA,KAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAO/C,8BACP,CAAA,UAAA,CAAYC,wCACZ,CAAA,MAAA,CAAQ,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,EAEKwD,EAAwBtF,CAAAA,CAAAA,CAGxBuF,CAAN,CAAA,cAA4BrF,CAAkB,CAC5C,WAAA,CAAYG,CAAoC,CAAA,CAC9C,MAAMgF,CAAqBhF,CAAAA,CAAO,EACpC,CACF,EC1BMmF,IAAAA,EAAAA,CAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,8NAGIC,EAAqBxD,CAAAA,wBAAAA,CAAgBC,2BAAsBC,CAAAA,4CAAqC,EAAE,KAAM,CAAA,CAC5G,IAAMoD,CAAAA,EAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOpD,CAAAA,8BAAAA,CACP,UAAYC,CAAAA,wCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEK6D,CAAAA,EAAAA,CAAuB3F,CAGvB4F,CAAAA,CAAAA,CAAN,cAA2B1F,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAMqF,CAAAA,EAAAA,CAAoBrF,CAAO,EACnC,CACF,ECkBA,IAAMwF,EAAkB,CAAA,MAAA,CAClBrF,EAAN,KAAyF,CAAzF,WACE,EAAA,CAAA,IAAA,CAAS,QAAU,IACnB,CAAA,IAAA,CAAS,IAAOqF,CAAAA,EAAAA,CAGhB,KAAiB,kBAKV,CAAA,CACH,CAACpD,CAAsB,EAAG,CACxB,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAACG,CAAiB,EAAG,CACnB,KAAA,CAAOI,EACP,YAAcD,CAAAA,EAAAA,CACd,WAAaD,CAAAA,CACf,EACA,CAACG,CAAkB,EAAG,CACpB,MAAOI,CACP,CAAA,YAAA,CAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CACA,CAAA,CAACmC,EAAmB,EAAG,CACrB,KAAOI,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaD,EACf,CAAA,CACA,CAAC7B,CAA0B,EAAG,CAC5B,KAAA,CAAOI,CACP,CAAA,YAAA,CAAcD,GACd,WAAaD,CAAAA,CACf,CACA,CAAA,CAACoB,CAAoB,EAAG,CACtB,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAAC7B,CAA2B,EAAG,CAC7B,KAAA,CAAOI,EACP,YAAcD,CAAAA,EAAAA,CACd,WAAaD,CAAAA,CACf,EACA,CAACQ,CAA2B,EAAG,CAC7B,KAAOM,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaH,CACf,CAAA,CACA,CAACK,CAAmB,EAAG,CACrB,KAAA,CAAOI,CACP,CAAA,YAAA,CAAcD,GACd,WAAaD,CAAAA,CACf,CACA,CAAA,CAACG,CAAmB,EAAG,CACrB,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAACjD,CAAmB,EAAG,CACrB,KAAA,CAAOS,EACP,YAAcD,CAAAA,EAAAA,CACd,WAAaN,CAAAA,CACf,CACF,CAEF,CAAA,IAAA,CAAiB,uBAKZ,CAAA,IAEL,iBAA8B,EAAA,CAC5B,OAAO,MAAA,CAAO,KAAK,IAAK,CAAA,kBAAkB,CAC5C,CAEA,UAAU6D,CAAczF,CAAAA,CAAAA,CAAyB,CAC/C,GAAI,EAAEyF,CAAQ,IAAA,IAAA,CAAK,kBACjB,CAAA,CAAA,MAAM,IAAIC,sBAAc,CAAA,CACtB,IAAM,CAAA,CAAA,iBAAA,EAAoBD,CAAI,CAC9B,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,oBAAoBA,CAAI,CAAA;AAAA,UACrC,EAAA,IAAA,CAAK,iBAAkB,EAAA,CAAE,IAAK,CAAA,IAAI,CAAC,CAAA,CAAE,CAC3C,CAAC,CAGH,CAAA,IAAME,CAAQ,CAAA,IAAA,CAAK,mBAAmBF,CAAI,CAAA,CAAE,KACtCG,CAAAA,CAAAA,CAAeC,CAAA,CAAA,EAAA,CAChB7F,CAECC,CAAAA,CAAAA,CAAAA,CAAgB,IAAK,CAAA,kBAAA,CAAmBwF,CAAI,CAAA,CAAE,YAAa,CAAA,KAAA,CAAMG,CAAY,CACnF,CAAA,OAAO,IAAID,CAAAA,CAAM1F,CAAa,CAChC,CAEA,eAAA,CAAgBwF,CAAmC,CAAA,CACjD,GAAI,EAAEA,CAAQ,IAAA,IAAA,CAAK,oBACjB,MAAM,IAAIC,sBAAc,CAAA,CACtB,IAAM,CAAA,CAAA,iBAAA,EAAoBD,CAAI,CAAA,UAAA,CAAA,CAC9B,KAAO,CAAA,IAAI,KAAM,CAAA,CAAA,iBAAA,EAAoBA,CAAI,CAAA;AAAA,UAAA,EACrC,IAAK,CAAA,iBAAA,EAAoB,CAAA,IAAA,CAAK,IAAI,CAAC,CAAA,CAAE,CAC3C,CAAC,EAEH,OAAO,IAAA,CAAK,kBAAmBA,CAAAA,CAAI,EAAE,WACvC,CAEA,gBAAwD,EAAA,CACtD,OAAO,MAAA,CAAO,IAAK,CAAA,IAAA,CAAK,kBAAkB,CAAE,CAAA,MAAA,CAAO,CAACK,CAAAA,CAAKC,KACvDD,CAAIC,CAAAA,CAAG,CAAI,CAAA,IAAA,CAAK,mBAAmBA,CAAG,CAAA,CAAE,WACjCD,CAAAA,CAAAA,CAAAA,CACN,EAAyC,CAC9C,CAEA,wBAAmC,CACjC,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,uBAAuB,CACjD,CAEA,cAAA,CAAeL,EAAczF,CAA8B,CAAA,CACzD,GAAI,EAAEyF,CAAQ,IAAA,IAAA,CAAK,uBACjB,CAAA,CAAA,MAAM,IAAIC,sBAAc,CAAA,CACtB,IAAM,CAAA,CAAA,sBAAA,EAAyBD,CAAI,CACnC,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,yBAAyBA,CAAI,CAAA;AAAA,UAC1C,EAAA,IAAA,CAAK,sBAAuB,EAAA,CAAE,IAAK,CAAA,IAAI,CAAC,CAAA,CAAE,CAChD,CAAC,CAGH,CAAA,IAAME,CAAQ,CAAA,IAAA,CAAK,wBAAwBF,CAAI,CAAA,CAAE,KAC3CG,CAAAA,CAAAA,CAAeC,CAAA,CAAA,EAAA,CAChB7F,CAECC,CAAAA,CAAAA,CAAAA,CAAgB,IAAK,CAAA,uBAAA,CAAwBwF,CAAI,CAAA,CAAE,YAAa,CAAA,KAAA,CAAMG,CAAY,CACxF,CAAA,OAAO,IAAID,CAAAA,CAAM1F,CAAa,CAChC,CAEA,oBAAA,CAAqBwF,CAAwC,CAAA,CAC3D,GAAI,EAAEA,CAAQ,IAAA,IAAA,CAAK,yBACjB,MAAM,IAAIC,sBAAc,CAAA,CACtB,IAAM,CAAA,CAAA,sBAAA,EAAyBD,CAAI,CAAA,UAAA,CAAA,CACnC,KAAO,CAAA,IAAI,KAAM,CAAA,CAAA,sBAAA,EAAyBA,CAAI,CAAA;AAAA,UAC1C,EAAA,IAAA,CAAK,wBAAyB,CAAA,IAAA,CAAK,IAAI,CAAC,CAAA,CAAE,CAChD,CAAC,CAAA,CAEH,OAAO,IAAK,CAAA,uBAAA,CAAwBA,CAAI,CAAE,CAAA,WAC5C,CAEA,qBAAkE,EAAA,CAChE,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,uBAAuB,CAAA,CAAE,OAAO,CAACK,CAAAA,CAAKC,KAC5DD,CAAIC,CAAAA,CAAG,EAAI,IAAK,CAAA,uBAAA,CAAwBA,CAAG,CAAE,CAAA,WAAA,CACtCD,GACN,EAA8C,CACnD,CACF,EAxJM3F,EAGY,OAAU,CAAA,gCAAA","file":"index.js","sourcesContent":["import { z } from \"zod\";\n\nimport { BaseChatModel, OpenAIChatRequestMessageType } from \"@adaline/openai\";\nimport { ChatModelSchemaType, ParamsType } from \"@adaline/provider\";\n\nimport { Groq } from \"../../provider\";\nimport { MessageType } from \"@adaline/types\";\n\nconst BaseChatModelOptions = z.object({\n apiKey: z.string().min(1),\n});\ntype BaseChatModelOptionsType = z.infer<typeof BaseChatModelOptions>;\n\nclass BaseChatModelGroq extends BaseChatModel {\n readonly version = \"v1\" as const;\n modelSchema: ChatModelSchemaType;\n\n private readonly groqApiKey: string;\n\n constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType) {\n const parsedOptions = BaseChatModelOptions.parse(options);\n const baseUrl = Groq.baseUrl;\n super(modelSchema, {\n apiKey: parsedOptions.apiKey,\n baseUrl: baseUrl,\n completeChatUrl: `${baseUrl}/chat/completions`,\n streamChatUrl: `${baseUrl}/chat/completions`,\n });\n this.modelSchema = modelSchema;\n this.groqApiKey = parsedOptions.apiKey;\n }\n\n transformMessages(messages: MessageType[]): ParamsType {\n const transformedMessages = super.transformMessages(messages) as { messages: OpenAIChatRequestMessageType[] };\n\n // Groq expects the content to be a string for system and assistant messages\n // OpenAI transformer takes care of validating role and modality \n transformedMessages.messages.forEach((message) => {\n if (message.role === \"system\") {\n if (typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n } else if (message.role === \"assistant\") {\n if (message.content && typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n }\n });\n\n return transformedMessages;\n }\n\n\n // TODO: streamChat needs it's own implementation, OpenAI stream transformer is skipping some chunks\n}\n\nexport { \n BaseChatModelGroq, \n BaseChatModelOptions, \n type BaseChatModelOptionsType,\n};\n","import { CHAT_CONFIG, MultiStringConfigItem, RangeConfigItem, SelectStringConfigItem } from \"@adaline/provider\";\n\nconst temperature = RangeConfigItem({\n param: \"temperature\",\n title: CHAT_CONFIG.TEMPERATURE.title,\n description: CHAT_CONFIG.TEMPERATURE.description,\n min: 0,\n max: 2,\n step: 0.01,\n default: 1,\n});\n\nconst maxTokens = (maxOutputTokens: number) =>\n RangeConfigItem({\n param: \"max_tokens\",\n title: CHAT_CONFIG.MAX_TOKENS.title,\n description: CHAT_CONFIG.MAX_TOKENS.description,\n min: 0,\n max: maxOutputTokens,\n step: 1,\n default: 0,\n });\n\nconst stop = MultiStringConfigItem({\n param: \"stop\",\n title: CHAT_CONFIG.STOP(4).title,\n description: CHAT_CONFIG.STOP(4).description,\n max: 4,\n });\n\nconst topP = RangeConfigItem({\n param: \"top_p\",\n title: CHAT_CONFIG.TOP_P.title,\n description: CHAT_CONFIG.TOP_P.description,\n min: 0,\n max: 1,\n step: 0.01,\n default: 1,\n});\n\nconst frequencyPenalty = RangeConfigItem({\n param: \"frequency_penalty\",\n title: CHAT_CONFIG.FREQUENCY_PENALTY.title,\n description: CHAT_CONFIG.FREQUENCY_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst presencePenalty = RangeConfigItem({\n param: \"presence_penalty\",\n title: CHAT_CONFIG.PRESENCE_PENALTY.title,\n description: CHAT_CONFIG.PRESENCE_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst seed = RangeConfigItem({\n param: \"seed\",\n title: CHAT_CONFIG.SEED.title,\n description: CHAT_CONFIG.SEED.description,\n min: 0,\n max: 1000000,\n step: 1,\n default: 0,\n});\n\nconst responseFormat = SelectStringConfigItem({\n param: \"response_format\",\n title: CHAT_CONFIG.RESPONSE_FORMAT.title,\n description: CHAT_CONFIG.RESPONSE_FORMAT.description,\n default: \"text\",\n choices: [\"text\", \"json_object\"],\n});\n\nconst toolChoice = SelectStringConfigItem({\n param: \"tool_choice\",\n title: \"Tool choice\",\n description:\n \"Controls which (if any) tool is called by the model. \\\n 'none' means the model will not call a function. \\\n 'auto' means the model can pick between generating a message or calling a tool.\",\n default: \"auto\",\n choices: [\"auto\", \"required\", \"none\"],\n});\n\nexport { \n frequencyPenalty, \n maxTokens, \n presencePenalty, \n seed, \n stop, \n temperature, \n toolChoice, \n topP, \n responseFormat \n};\n","import { z } from \"zod\";\n\nimport {\n frequencyPenalty,\n maxTokens,\n presencePenalty,\n seed,\n stop,\n temperature,\n toolChoice,\n topP,\n responseFormat,\n} from \"./common.config.chat-model.groq\";\n\nconst ChatModelBaseConfigSchema = (maxOutputTokens: number) =>\n z.object({\n temperature: temperature.schema,\n maxTokens: maxTokens(maxOutputTokens).schema,\n stop: stop.schema,\n topP: topP.schema,\n frequencyPenalty: frequencyPenalty.schema,\n presencePenalty: presencePenalty.schema,\n seed: seed.schema.transform((value) => (value === 0 ? undefined : value)),\n responseFormat: responseFormat.schema,\n toolChoice: toolChoice.schema,\n });\n\nconst ChatModelBaseConfigDef = (maxOutputTokens: number) =>\n ({\n temperature: temperature.def,\n maxTokens: maxTokens(maxOutputTokens).def,\n stop: stop.def,\n topP: topP.def,\n frequencyPenalty: frequencyPenalty.def,\n presencePenalty: presencePenalty.def,\n seed: seed.def,\n responseFormat: responseFormat.def,\n toolChoice: toolChoice.def,\n }) as const;\n\nexport { ChatModelBaseConfigDef, ChatModelBaseConfigSchema };\n","import {\n ChatModelBaseConfigDef,\n ChatModelBaseConfigSchema,\n} from \"./chat-model\";\n\nconst GroqChatModelConfigs = {\n base: (maxOutputTokens: number) => ({\n def: ChatModelBaseConfigDef(maxOutputTokens),\n schema: ChatModelBaseConfigSchema(maxOutputTokens),\n }),\n} as const;\n\nexport { GroqChatModelConfigs };\n","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Mixtral_8x7bLiteral = \"mixtral-8x7b-32768\" as const;\n// https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1\nconst Mixtral_8x7bDescription = \n \"The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts.\";\n\nconst Mixtral_8x7bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Mixtral_8x7bLiteral,\n description: Mixtral_8x7bDescription,\n maxInputTokens: 32768,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Mixtral_8x7bOptions = BaseChatModelOptions;\ntype Mixtral_8x7bOptionsType = z.infer<typeof Mixtral_8x7bOptions>;\n\nclass Mixtral_8x7b extends BaseChatModelGroq {\n constructor(options: Mixtral_8x7bOptionsType) {\n super(Mixtral_8x7bSchema, options);\n }\n}\n\nexport {\n Mixtral_8x7b,\n Mixtral_8x7bOptions,\n Mixtral_8x7bSchema,\n Mixtral_8x7bLiteral,\n type Mixtral_8x7bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst LlamaGuard_3_8bLiteral = \"llama-guard-3-8b\" as const;\n// https://huggingface.co/meta-llama/Llama-Guard-3-8B\nconst LlamaGuard_3_8bDescription = \n \"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.\";\n\nconst LlamaGuard_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: LlamaGuard_3_8bLiteral,\n description: LlamaGuard_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst LlamaGuard_3_8bOptions = BaseChatModelOptions;\ntype LlamaGuard_3_8bOptionsType = z.infer<typeof LlamaGuard_3_8bOptions>;\n\nclass LlamaGuard_3_8b extends BaseChatModelGroq {\n constructor(options: LlamaGuard_3_8bOptionsType) {\n super(LlamaGuard_3_8bSchema, options);\n }\n}\n\nexport {\n LlamaGuard_3_8b,\n LlamaGuard_3_8bOptions,\n LlamaGuard_3_8bSchema,\n LlamaGuard_3_8bLiteral,\n type LlamaGuard_3_8bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8bLiteral = \"llama3-8b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct\nconst Llama_3_8bDescription = \n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8bLiteral,\n description: Llama_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8bOptions = BaseChatModelOptions;\ntype Llama_3_8bOptionsType = z.infer<typeof Llama_3_8bOptions>;\n\nclass Llama_3_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_8bOptionsType) {\n super(Llama_3_8bSchema, options);\n }\n}\n\nexport {\n Llama_3_8b,\n Llama_3_8bOptions,\n Llama_3_8bSchema,\n Llama_3_8bLiteral,\n type Llama_3_8bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70bLiteral = \"llama3-70b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct\nconst Llama_3_70bDescription = \n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70bLiteral,\n description: Llama_3_70bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70bOptions = BaseChatModelOptions;\ntype Llama_3_70bOptionsType = z.infer<typeof Llama_3_70bOptions>;\n\nclass Llama_3_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_70bOptionsType) {\n super(Llama_3_70bSchema, options);\n }\n}\n\nexport {\n Llama_3_70b,\n Llama_3_70bOptions,\n Llama_3_70bSchema,\n Llama_3_70bLiteral,\n type Llama_3_70bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70b_Tool_UseLiteral = \"llama3-groq-70b-8192-tool-use-preview\" as const;\n// https://huggingface.co/Groq/Llama-3-Groq-70B-Tool-Use\nconst Llama_3_70b_Tool_UseDescription = \n \"This is the 70B parameter version of the Llama 3 Groq Tool Use model, \\\n specifically designed for advanced tool use and function calling tasks.\";\n\nconst Llama_3_70b_Tool_UseSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70b_Tool_UseLiteral,\n description: Llama_3_70b_Tool_UseDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70b_Tool_Use_Options = BaseChatModelOptions;\ntype Llama_3_70b_Tool_Use_OptionsType = z.infer<typeof Llama_3_70b_Tool_Use_Options>;\n\nclass Llama_3_70b_Tool_Use extends BaseChatModelGroq {\n constructor(options: Llama_3_70b_Tool_Use_OptionsType) {\n super(Llama_3_70b_Tool_UseSchema, options);\n }\n}\n\nexport {\n Llama_3_70b_Tool_Use,\n Llama_3_70b_Tool_Use_Options,\n Llama_3_70b_Tool_UseSchema,\n Llama_3_70b_Tool_UseLiteral,\n type Llama_3_70b_Tool_Use_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8b_Tool_UseLiteral = \"llama3-groq-8b-8192-tool-use-preview\" as const;\n// https://huggingface.co/Groq/Llama-3-Groq-8B-Tool-Use\nconst Llama_3_8b_Tool_UseDescription = \n \"This is the 8B parameter version of the Llama 3 Groq Tool Use model, \\\n specifically designed for advanced tool use and function calling tasks.\";\n\nconst Llama_3_8b_Tool_UseSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8b_Tool_UseLiteral,\n description: Llama_3_8b_Tool_UseDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8b_Tool_Use_Options = BaseChatModelOptions;\ntype Llama_3_8b_Tool_Use_OptionsType = z.infer<typeof Llama_3_8b_Tool_Use_Options>;\n\nclass Llama_3_8b_Tool_Use extends BaseChatModelGroq {\n constructor(options: Llama_3_8b_Tool_Use_OptionsType) {\n super(Llama_3_8b_Tool_UseSchema, options);\n }\n}\n\nexport {\n Llama_3_8b_Tool_Use,\n Llama_3_8b_Tool_Use_Options,\n Llama_3_8b_Tool_UseSchema,\n Llama_3_8b_Tool_UseLiteral,\n type Llama_3_8b_Tool_Use_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelModalities,\n OpenAIChatModelModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_11b_VisionLiteral = \"llama-3.2-11b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-11B-Vision\nconst Llama_3_2_11b_VisionDescription = \n \"The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, \\\n and answering general questions about an image. \\\n The models outperform many of the available open source and closed multimodal models on common industry benchmarks.\";\n\nconst Llama_3_2_11b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_11b_VisionLiteral,\n description: Llama_3_2_11b_VisionDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_11b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_11b_VisionOptionsType = z.infer<typeof Llama_3_2_11b_VisionOptions>;\n\nclass Llama_3_2_11b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_11b_VisionOptionsType) {\n super(Llama_3_2_11b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n type Llama_3_2_11b_VisionOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_3bLiteral = \"llama-3.2-3b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-3B\nconst Llama_3_2_3bDescription = \n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_3bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_3bLiteral,\n description: Llama_3_2_3bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_3b_Options = BaseChatModelOptions;\ntype Llama_3_2_3b_OptionsType = z.infer<typeof Llama_3_2_3b_Options>;\n\nclass Llama_3_2_3b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_3b_OptionsType) {\n super(Llama_3_2_3bSchema, options);\n }\n}\n\nexport {\n Llama_3_2_3b,\n Llama_3_2_3b_Options,\n Llama_3_2_3bSchema,\n Llama_3_2_3bLiteral,\n type Llama_3_2_3b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_1bLiteral = \"llama-3.2-1b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-1B\nconst Llama_3_2_1bDescription = \n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_1bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_1bLiteral,\n description: Llama_3_2_1bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_1b_Options = BaseChatModelOptions;\ntype Llama_3_2_1b_OptionsType = z.infer<typeof Llama_3_2_1b_Options>;\n\nclass Llama_3_2_1b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_1b_OptionsType) {\n super(Llama_3_2_1bSchema, options);\n }\n}\n\nexport {\n Llama_3_2_1b,\n Llama_3_2_1b_Options,\n Llama_3_2_1bSchema,\n Llama_3_2_1bLiteral,\n type Llama_3_2_1b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_70bLiteral = \"llama-3.1-70b-versatile\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_70bDescription = \n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_70bLiteral,\n description: Llama_3_1_70bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_70b_Options = BaseChatModelOptions;\ntype Llama_3_1_70b_OptionsType = z.infer<typeof Llama_3_1_70b_Options>;\n\nclass Llama_3_1_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_70b_OptionsType) {\n super(Llama_3_1_70bSchema, options);\n }\n}\n\nexport {\n Llama_3_1_70b,\n Llama_3_1_70b_Options,\n Llama_3_1_70bSchema,\n Llama_3_1_70bLiteral,\n type Llama_3_1_70b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_8bLiteral = \"llama-3.1-8b-instant\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_8bDescription = \n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_8bLiteral,\n description: Llama_3_1_8bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_8b_Options = BaseChatModelOptions;\ntype Llama_3_1_8b_OptionsType = z.infer<typeof Llama_3_1_8b_Options>;\n\nclass Llama_3_1_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_8b_OptionsType) {\n super(Llama_3_1_8bSchema, options);\n }\n}\n\nexport {\n Llama_3_1_8b,\n Llama_3_1_8b_Options,\n Llama_3_1_8bSchema,\n Llama_3_1_8bLiteral,\n type Llama_3_1_8b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { \n ChatModelV1, \n ChatModelSchemaType, \n EmbeddingModelV1, \n EmbeddingModelSchemaType, \n ProviderV1, \n ProviderError \n} from \"@adaline/provider\";\n\nimport {\n LlamaGuard_3_8b,\n LlamaGuard_3_8bOptions,\n LlamaGuard_3_8bSchema,\n LlamaGuard_3_8bLiteral,\n Llama_3_8b,\n Llama_3_8bOptions,\n Llama_3_8bSchema,\n Llama_3_8bLiteral,\n Llama_3_70b,\n Llama_3_70bOptions,\n Llama_3_70bSchema,\n Llama_3_70bLiteral,\n Llama_3_1_8b,\n Llama_3_1_8b_Options,\n Llama_3_1_8bSchema,\n Llama_3_1_8bLiteral,\n Llama_3_1_70b,\n Llama_3_1_70b_Options,\n Llama_3_1_70bSchema,\n Llama_3_1_70bLiteral,\n Llama_3_8b_Tool_Use,\n Llama_3_8b_Tool_Use_Options,\n Llama_3_8b_Tool_UseSchema,\n Llama_3_8b_Tool_UseLiteral,\n Llama_3_70b_Tool_Use,\n Llama_3_70b_Tool_Use_Options,\n Llama_3_70b_Tool_UseSchema,\n Llama_3_70b_Tool_UseLiteral,\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n Llama_3_2_3b,\n Llama_3_2_3b_Options,\n Llama_3_2_3bSchema,\n Llama_3_2_3bLiteral,\n Llama_3_2_1b,\n Llama_3_2_1b_Options,\n Llama_3_2_1bSchema,\n Llama_3_2_1bLiteral,\n Mixtral_8x7b,\n Mixtral_8x7bOptions,\n Mixtral_8x7bSchema,\n Mixtral_8x7bLiteral,\n} from \"./../models\";\n\nconst ProviderLiteral = \"groq\";\nclass Groq<O extends Record<string, any> = Record<string, any>> implements ProviderV1<O> {\n readonly version = \"v1\" as const;\n readonly name = ProviderLiteral;\n static readonly baseUrl = \"https://api.groq.com/openai/v1\";\n\n private readonly chatModelFactories: \n Record<string, { \n model: { new (options: any): ChatModelV1 },\n modelOptions: z.ZodType<any>,\n modelSchema: ChatModelSchemaType\n }> = {\n [LlamaGuard_3_8bLiteral]: {\n model: LlamaGuard_3_8b,\n modelOptions: LlamaGuard_3_8bOptions,\n modelSchema: LlamaGuard_3_8bSchema,\n },\n [Llama_3_8bLiteral]: {\n model: Llama_3_8b,\n modelOptions: Llama_3_8bOptions,\n modelSchema: Llama_3_8bSchema,\n },\n [Llama_3_70bLiteral]: {\n model: Llama_3_70b,\n modelOptions: Llama_3_70bOptions,\n modelSchema: Llama_3_70bSchema,\n },\n [Llama_3_1_8bLiteral]: {\n model: Llama_3_1_8b,\n modelOptions: Llama_3_1_8b_Options,\n modelSchema: Llama_3_1_8bSchema,\n },\n [Llama_3_8b_Tool_UseLiteral]: {\n model: Llama_3_8b_Tool_Use,\n modelOptions: Llama_3_8b_Tool_Use_Options,\n modelSchema: Llama_3_8b_Tool_UseSchema,\n },\n [Llama_3_1_70bLiteral]: {\n model: Llama_3_1_70b,\n modelOptions: Llama_3_1_70b_Options,\n modelSchema: Llama_3_1_70bSchema,\n },\n [Llama_3_70b_Tool_UseLiteral]: {\n model: Llama_3_70b_Tool_Use,\n modelOptions: Llama_3_70b_Tool_Use_Options,\n modelSchema: Llama_3_70b_Tool_UseSchema,\n },\n [Llama_3_2_11b_VisionLiteral]: {\n model: Llama_3_2_11b_Vision,\n modelOptions: Llama_3_2_11b_VisionOptions,\n modelSchema: Llama_3_2_11b_VisionSchema,\n },\n [Llama_3_2_3bLiteral]: {\n model: Llama_3_2_3b,\n modelOptions: Llama_3_2_3b_Options,\n modelSchema: Llama_3_2_3bSchema,\n },\n [Llama_3_2_1bLiteral]: {\n model: Llama_3_2_1b,\n modelOptions: Llama_3_2_1b_Options,\n modelSchema: Llama_3_2_1bSchema,\n },\n [Mixtral_8x7bLiteral]: {\n model: Mixtral_8x7b,\n modelOptions: Mixtral_8x7bOptions,\n modelSchema: Mixtral_8x7bSchema,\n },\n };\n\n private readonly embeddingModelFactories: \n Record<string, { \n model: { new (options: any): EmbeddingModelV1 },\n modelOptions: z.ZodType<any>,\n modelSchema: EmbeddingModelSchemaType\n }> = {};\n\n chatModelLiterals(): string[] {\n return Object.keys(this.chatModelFactories);\n }\n\n chatModel(name: string, options: O): ChatModelV1 {\n if (!(name in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${name} not found`,\n cause: new Error(`Groq chat model: ${name} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.chatModelFactories[name].model;\n const modelOptions = {\n ...options,\n };\n const parsedOptions = this.chatModelFactories[name].modelOptions.parse(modelOptions);\n return new model(parsedOptions);\n }\n\n chatModelSchema(name: string): ChatModelSchemaType {\n if (!(name in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${name} not found`,\n cause: new Error(`Groq chat model: ${name} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n return this.chatModelFactories[name].modelSchema;\n }\n\n chatModelSchemas(): Record<string, ChatModelSchemaType> {\n return Object.keys(this.chatModelFactories).reduce((acc, key) => {\n acc[key] = this.chatModelFactories[key].modelSchema;\n return acc;\n }, {} as Record<string, ChatModelSchemaType>);\n }\n\n embeddingModelLiterals(): string[] {\n return Object.keys(this.embeddingModelFactories);\n }\n\n embeddingModel(name: string, options: O): EmbeddingModelV1 {\n if (!(name in this.embeddingModelFactories)) {\n throw new ProviderError({\n info: `Groq embedding model: ${name} not found`,\n cause: new Error(`Groq embedding model: ${name} not found, available embedding models: \n ${this.embeddingModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.embeddingModelFactories[name].model;\n const modelOptions = {\n ...options,\n };\n const parsedOptions = this.embeddingModelFactories[name].modelOptions.parse(modelOptions);\n return new model(parsedOptions);\n }\n\n embeddingModelSchema(name: string): EmbeddingModelSchemaType {\n if (!(name in this.embeddingModelFactories)) {\n throw new ProviderError({\n info: `Groq embedding model: ${name} not found`,\n cause: new Error(`Groq embedding model: ${name} not found, available embedding models: \n ${this.embeddingModelLiterals().join(\", \")}`),\n });\n }\n return this.embeddingModelFactories[name].modelSchema;\n }\n\n embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType> {\n return Object.keys(this.embeddingModelFactories).reduce((acc, key) => {\n acc[key] = this.embeddingModelFactories[key].modelSchema;\n return acc;\n }, {} as Record<string, EmbeddingModelSchemaType>);\n }\n}\n\nexport { Groq };"]}
|
package/dist/index.mjs
CHANGED
|
@@ -2,12 +2,14 @@ import { RangeConfigItem, CHAT_CONFIG, MultiStringConfigItem, SelectStringConfig
|
|
|
2
2
|
import { z as z$1 } from 'zod';
|
|
3
3
|
import { OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum, OpenAIChatModelRolesMap, OpenAIChatModelTextToolModalities, OpenAIChatModelModalitiesEnum, OpenAIChatModelModalities, BaseChatModel } from '@adaline/openai';
|
|
4
4
|
|
|
5
|
-
var Oe=Object.defineProperty;var
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
${this.
|
|
9
|
-
${this.
|
|
5
|
+
var Oe=Object.defineProperty;var te=Object.getOwnPropertySymbols;var Te=Object.prototype.hasOwnProperty,Le=Object.prototype.propertyIsEnumerable;var ae=(t,e,a)=>e in t?Oe(t,e,{enumerable:!0,configurable:!0,writable:!0,value:a}):t[e]=a,g=(t,e)=>{for(var a in e||(e={}))Te.call(e,a)&&ae(t,a,e[a]);if(te)for(var a of te(e))Le.call(e,a)&&ae(t,a,e[a]);return t};var n=z$1.object({apiKey:z$1.string().min(1)}),s=class extends BaseChatModel{constructor(a,r){let i=n.parse(r),m=d.baseUrl;super(a,{apiKey:i.apiKey,baseUrl:m,completeChatUrl:`${m}/chat/completions`,streamChatUrl:`${m}/chat/completions`});this.version="v1";this.modelSchema=a,this.groqApiKey=i.apiKey;}transformMessages(a){let r=super.transformMessages(a);return r.messages.forEach(i=>{i.role==="system"?typeof i.content!="string"&&(i.content=i.content.map(m=>m.text).join(`
|
|
6
|
+
`)):i.role==="assistant"&&i.content&&typeof i.content!="string"&&(i.content=i.content.map(m=>m.text).join(`
|
|
7
|
+
`));}),r}};var x=RangeConfigItem({param:"temperature",title:CHAT_CONFIG.TEMPERATURE.title,description:CHAT_CONFIG.TEMPERATURE.description,min:0,max:2,step:.01,default:1}),S=t=>RangeConfigItem({param:"max_tokens",title:CHAT_CONFIG.MAX_TOKENS.title,description:CHAT_CONFIG.MAX_TOKENS.description,min:0,max:t,step:1,default:0}),I=MultiStringConfigItem({param:"stop",title:CHAT_CONFIG.STOP(4).title,description:CHAT_CONFIG.STOP(4).description,max:4}),A=RangeConfigItem({param:"top_p",title:CHAT_CONFIG.TOP_P.title,description:CHAT_CONFIG.TOP_P.description,min:0,max:1,step:.01,default:1}),E=RangeConfigItem({param:"frequency_penalty",title:CHAT_CONFIG.FREQUENCY_PENALTY.title,description:CHAT_CONFIG.FREQUENCY_PENALTY.description,min:-2,max:2,step:.01,default:0}),q=RangeConfigItem({param:"presence_penalty",title:CHAT_CONFIG.PRESENCE_PENALTY.title,description:CHAT_CONFIG.PRESENCE_PENALTY.description,min:-2,max:2,step:.01,default:0}),R=RangeConfigItem({param:"seed",title:CHAT_CONFIG.SEED.title,description:CHAT_CONFIG.SEED.description,min:0,max:1e6,step:1,default:0}),G=SelectStringConfigItem({param:"response_format",title:CHAT_CONFIG.RESPONSE_FORMAT.title,description:CHAT_CONFIG.RESPONSE_FORMAT.description,default:"text",choices:["text","json_object"]}),B=SelectStringConfigItem({param:"tool_choice",title:"Tool choice",description:"Controls which (if any) tool is called by the model. 'none' means the model will not call a function. 'auto' means the model can pick between generating a message or calling a tool.",default:"auto",choices:["auto","required","none"]});var ie=t=>z$1.object({temperature:x.schema,maxTokens:S(t).schema,stop:I.schema,topP:A.schema,frequencyPenalty:E.schema,presencePenalty:q.schema,seed:R.schema.transform(e=>e===0?void 0:e),responseFormat:G.schema,toolChoice:B.schema}),le=t=>({temperature:x.def,maxTokens:S(t).def,stop:I.def,topP:A.def,frequencyPenalty:E.def,presencePenalty:q.def,seed:R.def,responseFormat:G.def,toolChoice:B.def});var o={base:t=>({def:le(t),schema:ie(t)})};var k="mixtral-8x7b-32768",qe="The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts.",v=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:k,description:qe,maxInputTokens:32768,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),me=n,c=class extends s{constructor(e){super(v,e);}};var z="llama-guard-3-8b",ze="Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.",P=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:z,description:ze,maxInputTokens:8192,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),re=n,_=class extends s{constructor(e){super(P,e);}};var U="llama3-8b-8192",De="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",F=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:U,description:De,maxInputTokens:8192,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),pe=n,h=class extends s{constructor(e){super(F,e);}};var w="llama3-70b-8192",Qe="The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.",V=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:w,description:Qe,maxInputTokens:8192,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),de=n,f=class extends s{constructor(e){super(V,e);}};var D="llama3-groq-70b-8192-tool-use-preview",eo="This is the 70B parameter version of the Llama 3 Groq Tool Use model, specifically designed for advanced tool use and function calling tasks.",j=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:D,description:eo,maxInputTokens:8192,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),ce=n,b=class extends s{constructor(e){super(j,e);}};var $="llama3-groq-8b-8192-tool-use-preview",io="This is the 8B parameter version of the Llama 3 Groq Tool Use model, specifically designed for advanced tool use and function calling tasks.",N=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:$,description:io,maxInputTokens:8192,maxOutputTokens:4096,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(4096).def,schema:o.base(4096).schema}}),_e=n,M=class extends s{constructor(e){super(N,e);}};var K="llama-3.2-11b-vision-preview",_o="The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image. The models outperform many of the available open source and closed multimodal models on common industry benchmarks.",Y=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelModalitiesEnum).parse({name:K,description:_o,maxInputTokens:128e3,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),he=n,u=class extends s{constructor(e){super(Y,e);}};var Q="llama-3.2-3b-preview",Oo="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",X=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:Q,description:Oo,maxInputTokens:128e3,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),fe=n,O=class extends s{constructor(e){super(X,e);}};var Z="llama-3.2-1b-preview",xo="The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.",H=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:Z,description:xo,maxInputTokens:128e3,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),be=n,T=class extends s{constructor(e){super(H,e);}};var J="llama-3.1-70b-versatile",Ro="The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",W=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:J,description:Ro,maxInputTokens:128e3,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),Me=n,L=class extends s{constructor(e){super(W,e);}};var ee="llama-3.1-8b-instant",Po="The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.",oe=ChatModelSchema(OpenAIChatModelRoles,OpenAIChatModelTextToolModalitiesEnum).parse({name:ee,description:Po,maxInputTokens:128e3,maxOutputTokens:8192,roles:OpenAIChatModelRolesMap,modalities:OpenAIChatModelTextToolModalities,config:{def:o.base(8192).def,schema:o.base(8192).schema}}),ue=n,y=class extends s{constructor(e){super(oe,e);}};var Uo="groq",d=class{constructor(){this.version="v1";this.name=Uo;this.chatModelFactories={[z]:{model:_,modelOptions:re,modelSchema:P},[U]:{model:h,modelOptions:pe,modelSchema:F},[w]:{model:f,modelOptions:de,modelSchema:V},[ee]:{model:y,modelOptions:ue,modelSchema:oe},[$]:{model:M,modelOptions:_e,modelSchema:N},[J]:{model:L,modelOptions:Me,modelSchema:W},[D]:{model:b,modelOptions:ce,modelSchema:j},[K]:{model:u,modelOptions:he,modelSchema:Y},[Q]:{model:O,modelOptions:fe,modelSchema:X},[Z]:{model:T,modelOptions:be,modelSchema:H},[k]:{model:c,modelOptions:me,modelSchema:v}};this.embeddingModelFactories={};}chatModelLiterals(){return Object.keys(this.chatModelFactories)}chatModel(e,a){if(!(e in this.chatModelFactories))throw new ProviderError({info:`Groq chat model: ${e} not found`,cause:new Error(`Groq chat model: ${e} not found, available chat models:
|
|
8
|
+
${this.chatModelLiterals().join(", ")}`)});let r=this.chatModelFactories[e].model,i=g({},a),m=this.chatModelFactories[e].modelOptions.parse(i);return new r(m)}chatModelSchema(e){if(!(e in this.chatModelFactories))throw new ProviderError({info:`Groq chat model: ${e} not found`,cause:new Error(`Groq chat model: ${e} not found, available chat models:
|
|
9
|
+
${this.chatModelLiterals().join(", ")}`)});return this.chatModelFactories[e].modelSchema}chatModelSchemas(){return Object.keys(this.chatModelFactories).reduce((e,a)=>(e[a]=this.chatModelFactories[a].modelSchema,e),{})}embeddingModelLiterals(){return Object.keys(this.embeddingModelFactories)}embeddingModel(e,a){if(!(e in this.embeddingModelFactories))throw new ProviderError({info:`Groq embedding model: ${e} not found`,cause:new Error(`Groq embedding model: ${e} not found, available embedding models:
|
|
10
|
+
${this.embeddingModelLiterals().join(", ")}`)});let r=this.embeddingModelFactories[e].model,i=g({},a),m=this.embeddingModelFactories[e].modelOptions.parse(i);return new r(m)}embeddingModelSchema(e){if(!(e in this.embeddingModelFactories))throw new ProviderError({info:`Groq embedding model: ${e} not found`,cause:new Error(`Groq embedding model: ${e} not found, available embedding models:
|
|
11
|
+
${this.embeddingModelLiterals().join(", ")}`)});return this.embeddingModelFactories[e].modelSchema}embeddingModelSchemas(){return Object.keys(this.embeddingModelFactories).reduce((e,a)=>(e[a]=this.embeddingModelFactories[a].modelSchema,e),{})}};d.baseUrl="https://api.groq.com/openai/v1";
|
|
10
12
|
|
|
11
|
-
export {
|
|
13
|
+
export { s as BaseChatModelGroq, n as BaseChatModelOptions, d as Groq, _ as LlamaGuard_3_8b, z as LlamaGuard_3_8bLiteral, re as LlamaGuard_3_8bOptions, P as LlamaGuard_3_8bSchema, L as Llama_3_1_70b, J as Llama_3_1_70bLiteral, W as Llama_3_1_70bSchema, Me as Llama_3_1_70b_Options, y as Llama_3_1_8b, ee as Llama_3_1_8bLiteral, oe as Llama_3_1_8bSchema, ue as Llama_3_1_8b_Options, u as Llama_3_2_11b_Vision, K as Llama_3_2_11b_VisionLiteral, he as Llama_3_2_11b_VisionOptions, Y as Llama_3_2_11b_VisionSchema, T as Llama_3_2_1b, Z as Llama_3_2_1bLiteral, H as Llama_3_2_1bSchema, be as Llama_3_2_1b_Options, O as Llama_3_2_3b, Q as Llama_3_2_3bLiteral, X as Llama_3_2_3bSchema, fe as Llama_3_2_3b_Options, f as Llama_3_70b, w as Llama_3_70bLiteral, de as Llama_3_70bOptions, V as Llama_3_70bSchema, b as Llama_3_70b_Tool_Use, D as Llama_3_70b_Tool_UseLiteral, j as Llama_3_70b_Tool_UseSchema, ce as Llama_3_70b_Tool_Use_Options, h as Llama_3_8b, U as Llama_3_8bLiteral, pe as Llama_3_8bOptions, F as Llama_3_8bSchema, M as Llama_3_8b_Tool_Use, $ as Llama_3_8b_Tool_UseLiteral, N as Llama_3_8b_Tool_UseSchema, _e as Llama_3_8b_Tool_Use_Options, c as Mixtral_8x7b, k as Mixtral_8x7bLiteral, me as Mixtral_8x7bOptions, v as Mixtral_8x7bSchema };
|
|
12
14
|
//# sourceMappingURL=index.mjs.map
|
|
13
15
|
//# sourceMappingURL=index.mjs.map
|
package/dist/index.mjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/provider/provider.groq.ts","../src/models/chat-models/base-chat-model.groq.ts","../src/configs/chat-model/common.config.chat-model.groq.ts","../src/configs/chat-model/base.config.chat-model.groq.ts","../src/configs/configs.groq.ts","../src/models/chat-models/mixtral-8x7b.groq.ts","../src/models/chat-models/llama-guard-3-8b.groq.ts","../src/models/chat-models/llama-3-8b.groq.ts","../src/models/chat-models/llama-3-70b.groq.ts","../src/models/chat-models/llama-3-70b-tool-use.groq.ts","../src/models/chat-models/llama-3-8b-tool-use.groq.ts","../src/models/chat-models/llama-3-2-11b-vision.groq.ts","../src/models/chat-models/llama-3-2-3b.groq.ts","../src/models/chat-models/llama-3-2-1b.groq.ts","../src/models/chat-models/llama-3-1-70b.groq.ts","../src/models/chat-models/llama-3-1-8b.groq.ts"],"names":["BaseChatModelOptions","z","BaseChatModelGroq","BaseChatModel","modelSchema","options","parsedOptions","baseUrl","Groq","temperature","RangeConfigItem","CHAT_CONFIG","maxTokens","maxOutputTokens","stop","MultiStringConfigItem","topP","frequencyPenalty","presencePenalty","seed","responseFormat","SelectStringConfigItem","toolChoice","ChatModelBaseConfigSchema","value","ChatModelBaseConfigDef","GroqChatModelConfigs","Mixtral_8x7bLiteral","Mixtral_8x7bDescription","Mixtral_8x7bSchema","ChatModelSchema","OpenAIChatModelRoles","OpenAIChatModelTextToolModalitiesEnum","OpenAIChatModelRolesMap","OpenAIChatModelTextToolModalities","Mixtral_8x7bOptions","Mixtral_8x7b","LlamaGuard_3_8bLiteral","LlamaGuard_3_8bDescription","LlamaGuard_3_8bSchema","LlamaGuard_3_8bOptions","LlamaGuard_3_8b","Llama_3_8bLiteral","Llama_3_8bDescription","Llama_3_8bSchema","Llama_3_8bOptions","Llama_3_8b","Llama_3_70bLiteral","Llama_3_70bDescription","Llama_3_70bSchema","Llama_3_70bOptions","Llama_3_70b","Llama_3_70b_Tool_UseLiteral","Llama_3_70b_Tool_UseDescription","Llama_3_70b_Tool_UseSchema","Llama_3_70b_Tool_Use_Options","Llama_3_70b_Tool_Use","Llama_3_8b_Tool_UseLiteral","Llama_3_8b_Tool_UseDescription","Llama_3_8b_Tool_UseSchema","Llama_3_8b_Tool_Use_Options","Llama_3_8b_Tool_Use","Llama_3_2_11b_VisionLiteral","Llama_3_2_11b_VisionDescription","Llama_3_2_11b_VisionSchema","OpenAIChatModelModalitiesEnum","OpenAIChatModelModalities","Llama_3_2_11b_VisionOptions","Llama_3_2_11b_Vision","Llama_3_2_3bLiteral","Llama_3_2_3bDescription","Llama_3_2_3bSchema","Llama_3_2_3b_Options","Llama_3_2_3b","Llama_3_2_1bLiteral","Llama_3_2_1bDescription","Llama_3_2_1bSchema","Llama_3_2_1b_Options","Llama_3_2_1b","Llama_3_1_70bLiteral","Llama_3_1_70bDescription","Llama_3_1_70bSchema","Llama_3_1_70b_Options","Llama_3_1_70b","Llama_3_1_8bLiteral","Llama_3_1_8bDescription","Llama_3_1_8bSchema","Llama_3_1_8b_Options","Llama_3_1_8b","ProviderLiteral","name","ProviderError","model","modelOptions","__spreadValues","acc","key"],"mappings":";;;;AAEA,IAAA,EAAA,CAAA,MAAA,CAAA,cAAA,CAAA,IAAA,EAAA,CAAA,MAAA,CAAA,qBAAA,CAAA,IAAA,EAAA,CAAA,MAAA,CAAA,SAAA,CAAA,cAAA,CAAA,EAAA,CAAA,MAAA,CAAA,SAAA,CAAA,oBAAA,CAAA,IAAA,EAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,GAAA,CAAA,IAAA,CAAA,CAAA,EAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,UAAA,CAAA,CAAA,CAAA,CAAA,YAAA,CAAA,CAAA,CAAA,CAAA,QAAA,CAAA,CAAA,CAAA,CAAA,KAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,GAAA,CAAA,IAAA,IAAA,CAAA,IAAA,CAAA,GAAA,CAAA,CAAA,EAAA,CAAA,CAAA,EAAA,CAAA,IAAA,CAAA,CAAA,CAAA,CAAA,CAAA,EAAA,EAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,GAAA,EAAA,CAAA,IAAA,IAAA,CAAA,IAAA,EAAA,CAAA,CAAA,CAAA,CAAA,EAAA,CAAA,IAAA,CAAA,CAAA,CAAA,CAAA,CAAA,EAAA,EAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,CAAA,OAAA,CAAA,CAAA,CCKMA,IAAAA,CAAAA,CAAuBC,GAAE,CAAA,MAAA,CAAO,CACpC,MAAQA,CAAAA,GAAAA,CAAE,MAAO,EAAA,CAAE,IAAI,CAAC,CAC1B,CAAC,CAAA,CAGKC,EAAN,cAAgCC,aAAc,CAM5C,WAAA,CAAYC,EAAkCC,CAAmC,CAAA,CAC/E,IAAMC,CAAgBN,CAAAA,CAAAA,CAAqB,MAAMK,CAAO,CAAA,CAClDE,CAAUC,CAAAA,CAAAA,CAAK,QACrB,KAAMJ,CAAAA,CAAAA,CAAa,CACjB,MAAA,CAAQE,EAAc,MACtB,CAAA,OAAA,CAASC,CACT,CAAA,eAAA,CAAiB,GAAGA,CAAO,CAAA,iBAAA,CAAA,CAC3B,aAAe,CAAA,CAAA,EAAGA,CAAO,CAC3B,iBAAA,CAAA,CAAC,CAbH,CAAA,IAAA,CAAS,QAAU,IAcjB,CAAA,IAAA,CAAK,WAAcH,CAAAA,CAAAA,CACnB,KAAK,UAAaE,CAAAA,CAAAA,CAAc,OAClC,CAGF,EC9BA,IAAMG,CAAAA,CAAcC,gBAAgB,CAClC,KAAA,CAAO,aACP,CAAA,KAAA,CAAOC,YAAY,WAAY,CAAA,KAAA,CAC/B,WAAaA,CAAAA,WAAAA,CAAY,YAAY,WACrC,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,CAAA,CACL,KAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,EAEKC,CAAaC,CAAAA,CAAAA,EACjBH,eAAgB,CAAA,CACd,MAAO,YACP,CAAA,KAAA,CAAOC,WAAY,CAAA,UAAA,CAAW,MAC9B,WAAaA,CAAAA,WAAAA,CAAY,WAAW,WACpC,CAAA,GAAA,CAAK,EACL,GAAKE,CAAAA,CAAAA,CACL,IAAM,CAAA,CAAA,CACN,QAAS,CACX,CAAC,CAEGC,CAAAA,CAAAA,CAAOC,sBAAsB,CAC/B,KAAA,CAAO,MACP,CAAA,KAAA,CAAOJ,YAAY,IAAK,CAAA,CAAC,EAAE,KAC3B,CAAA,WAAA,CAAaA,YAAY,IAAK,CAAA,CAAC,CAAE,CAAA,WAAA,CACjC,IAAK,CACP,CAAC,CAEGK,CAAAA,CAAAA,CAAON,gBAAgB,CAC3B,KAAA,CAAO,OACP,CAAA,KAAA,CAAOC,YAAY,KAAM,CAAA,KAAA,CACzB,WAAaA,CAAAA,WAAAA,CAAY,MAAM,WAC/B,CAAA,GAAA,CAAK,CACL,CAAA,GAAA,CAAK,EACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAEKM,CAAAA,CAAAA,CAAmBP,eAAgB,CAAA,CACvC,MAAO,mBACP,CAAA,KAAA,CAAOC,YAAY,iBAAkB,CAAA,KAAA,CACrC,YAAaA,WAAY,CAAA,iBAAA,CAAkB,WAC3C,CAAA,GAAA,CAAK,GACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,QAAS,CACX,CAAC,CAEKO,CAAAA,CAAAA,CAAkBR,gBAAgB,CACtC,KAAA,CAAO,mBACP,KAAOC,CAAAA,WAAAA,CAAY,iBAAiB,KACpC,CAAA,WAAA,CAAaA,WAAY,CAAA,gBAAA,CAAiB,YAC1C,GAAK,CAAA,CAAA,CAAA,CACL,GAAK,CAAA,CAAA,CACL,KAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,EAEKQ,CAAOT,CAAAA,eAAAA,CAAgB,CAC3B,KAAO,CAAA,MAAA,CACP,MAAOC,WAAY,CAAA,IAAA,CAAK,KACxB,CAAA,WAAA,CAAaA,YAAY,IAAK,CAAA,WAAA,CAC9B,GAAK,CAAA,CAAA,CACL,IAAK,GACL,CAAA,IAAA,CAAM,CACN,CAAA,OAAA,CAAS,CACX,CAAC,CAAA,CAEKS,CAAiBC,CAAAA,sBAAAA,CAAuB,CAC5C,KAAO,CAAA,iBAAA,CACP,KAAOV,CAAAA,WAAAA,CAAY,gBAAgB,KACnC,CAAA,WAAA,CAAaA,WAAY,CAAA,eAAA,CAAgB,YACzC,OAAS,CAAA,MAAA,CACT,OAAS,CAAA,CAAC,OAAQ,aAAa,CACjC,CAAC,CAEKW,CAAAA,CAAAA,CAAaD,uBAAuB,CACxC,KAAA,CAAO,aACP,CAAA,KAAA,CAAO,cACP,WACE,CAAA,+LAAA,CAGF,OAAS,CAAA,MAAA,CACT,QAAS,CAAC,MAAA,CAAQ,UAAY,CAAA,MAAM,CACtC,CAAC,CAAA,CCzED,IAAME,EAA6BV,CAAAA,CAAAA,EACjCZ,IAAE,MAAO,CAAA,CACP,WAAaQ,CAAAA,CAAAA,CAAY,OACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,OACtC,IAAMC,CAAAA,CAAAA,CAAK,MACX,CAAA,IAAA,CAAME,EAAK,MACX,CAAA,gBAAA,CAAkBC,EAAiB,MACnC,CAAA,eAAA,CAAiBC,EAAgB,MACjC,CAAA,IAAA,CAAMC,CAAK,CAAA,MAAA,CAAO,UAAWK,CAAWA,EAAAA,CAAAA,GAAU,CAAI,CAAA,KAAA,CAAA,CAAYA,CAAM,CACxE,CAAA,cAAA,CAAgBJ,CAAe,CAAA,MAAA,CAC/B,WAAYE,CAAW,CAAA,MACzB,CAAC,CAAA,CAEGG,GAA0BZ,CAC7B,GAAA,CACC,WAAaJ,CAAAA,CAAAA,CAAY,IACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,IACtC,IAAMC,CAAAA,CAAAA,CAAK,GACX,CAAA,IAAA,CAAME,EAAK,GACX,CAAA,gBAAA,CAAkBC,EAAiB,GACnC,CAAA,eAAA,CAAiBC,EAAgB,GACjC,CAAA,IAAA,CAAMC,CAAK,CAAA,GAAA,CACX,eAAgBC,CAAe,CAAA,GAAA,CAC/B,UAAYE,CAAAA,CAAAA,CAAW,GACzB,CCjCF,CAAA,CAAA,IAAMI,CAAuB,CAAA,CAC3B,KAAOb,CAA6B,GAAA,CAClC,IAAKY,EAAuBZ,CAAAA,CAAe,EAC3C,MAAQU,CAAAA,EAAAA,CAA0BV,CAAe,CACnD,EACF,CCIA,CAAA,IAAMc,CAAsB,CAAA,oBAAA,CAEtBC,GACJ,mGAEIC,CAAAA,CAAAA,CAAqBC,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAML,CAAAA,CAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOK,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKS,CAAAA,EAAAA,CAAsBnC,CAGtBoC,CAAAA,CAAAA,CAAN,cAA2BlC,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAkC,CAC5C,KAAMwB,CAAAA,CAAAA,CAAoBxB,CAAO,EACnC,CACF,MCzBMgC,CAAyB,CAAA,kBAAA,CAEzBC,GACJ,iGAEIC,CAAAA,CAAAA,CAAwBT,gBAAgBC,oBAAsBC,CAAAA,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC/G,IAAMK,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOL,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKc,CAAAA,EAAAA,CAAyBxC,EAGzByC,CAAN,CAAA,cAA8BvC,CAAkB,CAC9C,YAAYG,CAAqC,CAAA,CAC/C,KAAMkC,CAAAA,CAAAA,CAAuBlC,CAAO,EACtC,CACF,ECzBA,IAAMqC,CAAoB,CAAA,gBAAA,CAEpBC,GACJ,yKAGIC,CAAAA,CAAAA,CAAmBd,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC1G,IAAA,CAAMU,EACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,KACjB,KAAOV,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,EAEKmB,EAAoB7C,CAAAA,CAAAA,CAGpB8C,CAAN,CAAA,cAAyB5C,CAAkB,CACzC,WAAA,CAAYG,CAAgC,CAAA,CAC1C,MAAMuC,CAAkBvC,CAAAA,CAAO,EACjC,CACF,MC1BM0C,CAAqB,CAAA,iBAAA,CAErBC,EACJ,CAAA,yKAAA,CAGIC,EAAoBnB,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC3G,IAAMe,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOf,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,IAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKwB,CAAAA,EAAAA,CAAqBlD,EAGrBmD,CAAN,CAAA,cAA0BjD,CAAkB,CAC1C,YAAYG,CAAiC,CAAA,CAC3C,MAAM4C,CAAmB5C,CAAAA,CAAO,EAClC,CACF,EC1BA,IAAM+C,CAA8B,CAAA,uCAAA,CAE9BC,GACJ,iJAGIC,CAAAA,CAAAA,CAA6BxB,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAE,CAAA,KAAA,CAAM,CACpH,IAAA,CAAMoB,EACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,IAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOpB,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEK6B,CAAAA,EAAAA,CAA+BvD,EAG/BwD,CAAN,CAAA,cAAmCtD,CAAkB,CACnD,WAAA,CAAYG,CAA2C,CAAA,CACrD,MAAMiD,CAA4BjD,CAAAA,CAAO,EAC3C,CACF,EC1BA,IAAMoD,EAA6B,sCAE7BC,CAAAA,EAAAA,CACJ,gJAGIC,CAAAA,CAAAA,CAA4B7B,gBAAgBC,oBAAsBC,CAAAA,qCAAqC,CAAE,CAAA,KAAA,CAAM,CACnH,IAAMyB,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOzB,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,IAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,IACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKkC,CAAAA,EAAAA,CAA8B5D,EAG9B6D,CAAN,CAAA,cAAkC3D,CAAkB,CAClD,YAAYG,CAA0C,CAAA,CACpD,KAAMsD,CAAAA,CAAAA,CAA2BtD,CAAO,EAC1C,CACF,EC1BA,IAAMyD,CAA8B,CAAA,8BAAA,CAE9BC,GACJ,0RAIIC,CAAAA,CAAAA,CAA6BlC,gBAAgBC,oBAAsBkC,CAAAA,6BAA6B,EAAE,KAAM,CAAA,CAC5G,IAAMH,CAAAA,CAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAO9B,CAAAA,uBAAAA,CACP,UAAYiC,CAAAA,yBAAAA,CACZ,OAAQ,CACN,GAAA,CAAKxC,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKyC,CAAAA,EAAAA,CAA8BnE,CAG9BoE,CAAAA,CAAAA,CAAN,cAAmClE,CAAkB,CACnD,YAAYG,CAA0C,CAAA,CACpD,MAAM2D,CAA4B3D,CAAAA,CAAO,EAC3C,CACF,EC3BA,IAAMgE,EAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,oQAGIC,CAAAA,CAAAA,CAAqBzC,gBAAgBC,oBAAsBC,CAAAA,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAMqC,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOrC,CAAAA,uBAAAA,CACP,WAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEK8C,CAAAA,EAAAA,CAAuBxE,EAGvByE,CAAN,CAAA,cAA2BvE,CAAkB,CAC3C,YAAYG,CAAmC,CAAA,CAC7C,KAAMkE,CAAAA,CAAAA,CAAoBlE,CAAO,EACnC,CACF,EC1BA,IAAMqE,CAAsB,CAAA,sBAAA,CAEtBC,GACJ,oQAGIC,CAAAA,CAAAA,CAAqB9C,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAM0C,CAAAA,CAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAO1C,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,EAEKmD,EAAuB7E,CAAAA,CAAAA,CAGvB8E,CAAN,CAAA,cAA2B5E,CAAkB,CAC3C,WAAA,CAAYG,CAAmC,CAAA,CAC7C,MAAMuE,CAAoBvE,CAAAA,CAAO,EACnC,CACF,MC1BM0E,CAAuB,CAAA,yBAAA,CAEvBC,GACJ,6NAGIC,CAAAA,CAAAA,CAAsBnD,gBAAgBC,oBAAsBC,CAAAA,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC7G,IAAM+C,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAO/C,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKwD,CAAAA,EAAAA,CAAwBlF,EAGxBmF,CAAN,CAAA,cAA4BjF,CAAkB,CAC5C,WAAA,CAAYG,EAAoC,CAC9C,KAAA,CAAM4E,CAAqB5E,CAAAA,CAAO,EACpC,CACF,EC1BA,IAAM+E,EAAsB,CAAA,sBAAA,CAEtBC,GACJ,6NAGIC,CAAAA,EAAAA,CAAqBxD,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAMoD,GACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOpD,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,EAEK6D,EAAuBvF,CAAAA,CAAAA,CAGvBwF,CAAN,CAAA,cAA2BtF,CAAkB,CAC3C,WAAA,CAAYG,CAAmC,CAAA,CAC7C,MAAMiF,EAAoBjF,CAAAA,CAAO,EACnC,CACF,EfkBMoF,IAAAA,EAAAA,CAAkB,OAClBjF,CAAN,CAAA,KAAyF,CAAzF,WACE,EAAA,CAAA,IAAA,CAAS,OAAU,CAAA,IAAA,CACnB,KAAS,IAAOiF,CAAAA,EAAAA,CAGhB,IAAiB,CAAA,kBAAA,CAKV,CACH,CAACpD,CAAsB,EAAG,CACxB,MAAOI,CACP,CAAA,YAAA,CAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CACA,CAAA,CAACG,CAAiB,EAAG,CACnB,KAAOI,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaD,CACf,CAAA,CACA,CAACG,CAAkB,EAAG,CACpB,KAAA,CAAOI,EACP,YAAcD,CAAAA,EAAAA,CACd,YAAaD,CACf,CAAA,CACA,CAACmC,EAAmB,EAAG,CACrB,KAAA,CAAOI,CACP,CAAA,YAAA,CAAcD,GACd,WAAaD,CAAAA,EACf,CACA,CAAA,CAAC7B,CAA0B,EAAG,CAC5B,MAAOI,CACP,CAAA,YAAA,CAAcD,GACd,WAAaD,CAAAA,CACf,CACA,CAAA,CAACoB,CAAoB,EAAG,CACtB,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAAC7B,CAA2B,EAAG,CAC7B,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAACQ,CAA2B,EAAG,CAC7B,KAAA,CAAOM,EACP,YAAcD,CAAAA,EAAAA,CACd,WAAaH,CAAAA,CACf,EACA,CAACK,CAAmB,EAAG,CACrB,MAAOI,CACP,CAAA,YAAA,CAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CACA,CAAA,CAACG,CAAmB,EAAG,CACrB,KAAOI,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaD,CACf,CAAA,CACA,CAACjD,CAAmB,EAAG,CACrB,KAAOS,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaN,CACf,CACF,CAEF,CAAA,IAAA,CAAiB,wBAKZ,GAAC,CAEN,iBAA8B,EAAA,CAC5B,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,kBAAkB,CAC5C,CAEA,SAAU6D,CAAAA,CAAAA,CAAcrF,CAAyB,CAAA,CAC/C,GAAI,EAAEqF,CAAAA,IAAQ,IAAK,CAAA,kBAAA,CAAA,CACjB,MAAM,IAAIC,aAAAA,CAAc,CACtB,IAAA,CAAM,oBAAoBD,CAAI,CAAA,UAAA,CAAA,CAC9B,MAAO,IAAI,KAAA,CAAM,oBAAoBA,CAAI,CAAA;AAAA,UACrC,EAAA,IAAA,CAAK,iBAAkB,EAAA,CAAE,IAAK,CAAA,IAAI,CAAC,CAAA,CAAE,CAC3C,CAAC,CAGH,CAAA,IAAME,CAAQ,CAAA,IAAA,CAAK,mBAAmBF,CAAI,CAAA,CAAE,KACtCG,CAAAA,CAAAA,CAAeC,CAAA,CAAA,EAAA,CAChBzF,CAECC,CAAAA,CAAAA,CAAAA,CAAgB,IAAK,CAAA,kBAAA,CAAmBoF,CAAI,CAAA,CAAE,YAAa,CAAA,KAAA,CAAMG,CAAY,CACnF,CAAA,OAAO,IAAID,CAAAA,CAAMtF,CAAa,CAChC,CAEA,eAAA,CAAgBoF,CAAmC,CAAA,CACjD,GAAI,EAAEA,CAAQ,IAAA,IAAA,CAAK,oBACjB,MAAM,IAAIC,aAAc,CAAA,CACtB,IAAM,CAAA,CAAA,iBAAA,EAAoBD,CAAI,CAAA,UAAA,CAAA,CAC9B,KAAO,CAAA,IAAI,KAAM,CAAA,CAAA,iBAAA,EAAoBA,CAAI,CAAA;AAAA,UAAA,EACrC,IAAK,CAAA,iBAAA,EAAoB,CAAA,IAAA,CAAK,IAAI,CAAC,CAAA,CAAE,CAC3C,CAAC,EAEH,OAAO,IAAA,CAAK,kBAAmBA,CAAAA,CAAI,EAAE,WACvC,CAEA,gBAAwD,EAAA,CACtD,OAAO,MAAA,CAAO,IAAK,CAAA,IAAA,CAAK,kBAAkB,CAAE,CAAA,MAAA,CAAO,CAACK,CAAAA,CAAKC,KACvDD,CAAIC,CAAAA,CAAG,CAAI,CAAA,IAAA,CAAK,mBAAmBA,CAAG,CAAA,CAAE,WACjCD,CAAAA,CAAAA,CAAAA,CACN,EAAyC,CAC9C,CAEA,wBAAmC,CACjC,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,uBAAuB,CACjD,CAEA,cAAA,CAAeL,EAAcrF,CAA8B,CAAA,CACzD,GAAI,EAAEqF,CAAQ,IAAA,IAAA,CAAK,uBACjB,CAAA,CAAA,MAAM,IAAIC,aAAc,CAAA,CACtB,IAAM,CAAA,CAAA,sBAAA,EAAyBD,CAAI,CACnC,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,yBAAyBA,CAAI,CAAA;AAAA,UAC1C,EAAA,IAAA,CAAK,sBAAuB,EAAA,CAAE,IAAK,CAAA,IAAI,CAAC,CAAA,CAAE,CAChD,CAAC,CAGH,CAAA,IAAME,CAAQ,CAAA,IAAA,CAAK,wBAAwBF,CAAI,CAAA,CAAE,KAC3CG,CAAAA,CAAAA,CAAeC,CAAA,CAAA,EAAA,CAChBzF,CAECC,CAAAA,CAAAA,CAAAA,CAAgB,IAAK,CAAA,uBAAA,CAAwBoF,CAAI,CAAA,CAAE,YAAa,CAAA,KAAA,CAAMG,CAAY,CACxF,CAAA,OAAO,IAAID,CAAAA,CAAMtF,CAAa,CAChC,CAEA,oBAAA,CAAqBoF,CAAwC,CAAA,CAC3D,GAAI,EAAEA,CAAQ,IAAA,IAAA,CAAK,yBACjB,MAAM,IAAIC,aAAc,CAAA,CACtB,IAAM,CAAA,CAAA,sBAAA,EAAyBD,CAAI,CAAA,UAAA,CAAA,CACnC,KAAO,CAAA,IAAI,KAAM,CAAA,CAAA,sBAAA,EAAyBA,CAAI,CAAA;AAAA,UAC1C,EAAA,IAAA,CAAK,wBAAyB,CAAA,IAAA,CAAK,IAAI,CAAC,CAAA,CAAE,CAChD,CAAC,CAAA,CAEH,OAAO,IAAK,CAAA,uBAAA,CAAwBA,CAAI,CAAE,CAAA,WAC5C,CAEA,qBAAkE,EAAA,CAChE,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,uBAAuB,CAAA,CAAE,OAAO,CAACK,CAAAA,CAAKC,KAC5DD,CAAIC,CAAAA,CAAG,EAAI,IAAK,CAAA,uBAAA,CAAwBA,CAAG,CAAE,CAAA,WAAA,CACtCD,GACN,EAA8C,CACnD,CACF,EAxJMvF,EAGY,OAAU,CAAA,gCAAA","file":"index.mjs","sourcesContent":["import { z } from \"zod\";\n\nimport { \n ChatModelV1, \n ChatModelSchemaType, \n EmbeddingModelV1, \n EmbeddingModelSchemaType, \n ProviderV1, \n ProviderError \n} from \"@adaline/provider\";\n\nimport {\n LlamaGuard_3_8b,\n LlamaGuard_3_8bOptions,\n LlamaGuard_3_8bSchema,\n LlamaGuard_3_8bLiteral,\n Llama_3_8b,\n Llama_3_8bOptions,\n Llama_3_8bSchema,\n Llama_3_8bLiteral,\n Llama_3_70b,\n Llama_3_70bOptions,\n Llama_3_70bSchema,\n Llama_3_70bLiteral,\n Llama_3_1_8b,\n Llama_3_1_8b_Options,\n Llama_3_1_8bSchema,\n Llama_3_1_8bLiteral,\n Llama_3_1_70b,\n Llama_3_1_70b_Options,\n Llama_3_1_70bSchema,\n Llama_3_1_70bLiteral,\n Llama_3_8b_Tool_Use,\n Llama_3_8b_Tool_Use_Options,\n Llama_3_8b_Tool_UseSchema,\n Llama_3_8b_Tool_UseLiteral,\n Llama_3_70b_Tool_Use,\n Llama_3_70b_Tool_Use_Options,\n Llama_3_70b_Tool_UseSchema,\n Llama_3_70b_Tool_UseLiteral,\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n Llama_3_2_3b,\n Llama_3_2_3b_Options,\n Llama_3_2_3bSchema,\n Llama_3_2_3bLiteral,\n Llama_3_2_1b,\n Llama_3_2_1b_Options,\n Llama_3_2_1bSchema,\n Llama_3_2_1bLiteral,\n Mixtral_8x7b,\n Mixtral_8x7bOptions,\n Mixtral_8x7bSchema,\n Mixtral_8x7bLiteral,\n} from \"./../models\";\n\nconst ProviderLiteral = \"groq\";\nclass Groq<O extends Record<string, any> = Record<string, any>> implements ProviderV1<O> {\n readonly version = \"v1\" as const;\n readonly name = ProviderLiteral;\n static readonly baseUrl = \"https://api.groq.com/openai/v1\";\n\n private readonly chatModelFactories: \n Record<string, { \n model: { new (options: any): ChatModelV1 },\n modelOptions: z.ZodType<any>,\n modelSchema: ChatModelSchemaType\n }> = {\n [LlamaGuard_3_8bLiteral]: {\n model: LlamaGuard_3_8b,\n modelOptions: LlamaGuard_3_8bOptions,\n modelSchema: LlamaGuard_3_8bSchema,\n },\n [Llama_3_8bLiteral]: {\n model: Llama_3_8b,\n modelOptions: Llama_3_8bOptions,\n modelSchema: Llama_3_8bSchema,\n },\n [Llama_3_70bLiteral]: {\n model: Llama_3_70b,\n modelOptions: Llama_3_70bOptions,\n modelSchema: Llama_3_70bSchema,\n },\n [Llama_3_1_8bLiteral]: {\n model: Llama_3_1_8b,\n modelOptions: Llama_3_1_8b_Options,\n modelSchema: Llama_3_1_8bSchema,\n },\n [Llama_3_8b_Tool_UseLiteral]: {\n model: Llama_3_8b_Tool_Use,\n modelOptions: Llama_3_8b_Tool_Use_Options,\n modelSchema: Llama_3_8b_Tool_UseSchema,\n },\n [Llama_3_1_70bLiteral]: {\n model: Llama_3_1_70b,\n modelOptions: Llama_3_1_70b_Options,\n modelSchema: Llama_3_1_70bSchema,\n },\n [Llama_3_70b_Tool_UseLiteral]: {\n model: Llama_3_70b_Tool_Use,\n modelOptions: Llama_3_70b_Tool_Use_Options,\n modelSchema: Llama_3_70b_Tool_UseSchema,\n },\n [Llama_3_2_11b_VisionLiteral]: {\n model: Llama_3_2_11b_Vision,\n modelOptions: Llama_3_2_11b_VisionOptions,\n modelSchema: Llama_3_2_11b_VisionSchema,\n },\n [Llama_3_2_3bLiteral]: {\n model: Llama_3_2_3b,\n modelOptions: Llama_3_2_3b_Options,\n modelSchema: Llama_3_2_3bSchema,\n },\n [Llama_3_2_1bLiteral]: {\n model: Llama_3_2_1b,\n modelOptions: Llama_3_2_1b_Options,\n modelSchema: Llama_3_2_1bSchema,\n },\n [Mixtral_8x7bLiteral]: {\n model: Mixtral_8x7b,\n modelOptions: Mixtral_8x7bOptions,\n modelSchema: Mixtral_8x7bSchema,\n },\n };\n\n private readonly embeddingModelFactories: \n Record<string, { \n model: { new (options: any): EmbeddingModelV1 },\n modelOptions: z.ZodType<any>,\n modelSchema: EmbeddingModelSchemaType\n }> = {};\n\n chatModelLiterals(): string[] {\n return Object.keys(this.chatModelFactories);\n }\n\n chatModel(name: string, options: O): ChatModelV1 {\n if (!(name in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${name} not found`,\n cause: new Error(`Groq chat model: ${name} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.chatModelFactories[name].model;\n const modelOptions = {\n ...options,\n };\n const parsedOptions = this.chatModelFactories[name].modelOptions.parse(modelOptions);\n return new model(parsedOptions);\n }\n\n chatModelSchema(name: string): ChatModelSchemaType {\n if (!(name in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${name} not found`,\n cause: new Error(`Groq chat model: ${name} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n return this.chatModelFactories[name].modelSchema;\n }\n\n chatModelSchemas(): Record<string, ChatModelSchemaType> {\n return Object.keys(this.chatModelFactories).reduce((acc, key) => {\n acc[key] = this.chatModelFactories[key].modelSchema;\n return acc;\n }, {} as Record<string, ChatModelSchemaType>);\n }\n\n embeddingModelLiterals(): string[] {\n return Object.keys(this.embeddingModelFactories);\n }\n\n embeddingModel(name: string, options: O): EmbeddingModelV1 {\n if (!(name in this.embeddingModelFactories)) {\n throw new ProviderError({\n info: `Groq embedding model: ${name} not found`,\n cause: new Error(`Groq embedding model: ${name} not found, available embedding models: \n ${this.embeddingModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.embeddingModelFactories[name].model;\n const modelOptions = {\n ...options,\n };\n const parsedOptions = this.embeddingModelFactories[name].modelOptions.parse(modelOptions);\n return new model(parsedOptions);\n }\n\n embeddingModelSchema(name: string): EmbeddingModelSchemaType {\n if (!(name in this.embeddingModelFactories)) {\n throw new ProviderError({\n info: `Groq embedding model: ${name} not found`,\n cause: new Error(`Groq embedding model: ${name} not found, available embedding models: \n ${this.embeddingModelLiterals().join(\", \")}`),\n });\n }\n return this.embeddingModelFactories[name].modelSchema;\n }\n\n embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType> {\n return Object.keys(this.embeddingModelFactories).reduce((acc, key) => {\n acc[key] = this.embeddingModelFactories[key].modelSchema;\n return acc;\n }, {} as Record<string, EmbeddingModelSchemaType>);\n }\n}\n\nexport { Groq };","import { z } from \"zod\";\n\nimport { BaseChatModel } from \"@adaline/openai\";\nimport { ChatModelSchemaType } from \"@adaline/provider\";\n\nimport { Groq } from \"../../provider\";\n\nconst BaseChatModelOptions = z.object({\n apiKey: z.string().min(1),\n});\ntype BaseChatModelOptionsType = z.infer<typeof BaseChatModelOptions>;\n\nclass BaseChatModelGroq extends BaseChatModel {\n readonly version = \"v1\" as const;\n modelSchema: ChatModelSchemaType;\n\n private readonly groqApiKey: string;\n\n constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType) {\n const parsedOptions = BaseChatModelOptions.parse(options);\n const baseUrl = Groq.baseUrl;\n super(modelSchema, {\n apiKey: parsedOptions.apiKey,\n baseUrl: baseUrl,\n completeChatUrl: `${baseUrl}/chat/completions`,\n streamChatUrl: `${baseUrl}/chat/completions`,\n });\n this.modelSchema = modelSchema;\n this.groqApiKey = parsedOptions.apiKey;\n }\n\n // TODO: streamChat needs it's own implementation, OpenAI stream transformer is skipping some chunks\n}\n\nexport { \n BaseChatModelGroq, \n BaseChatModelOptions, \n type BaseChatModelOptionsType,\n};\n","import { CHAT_CONFIG, MultiStringConfigItem, RangeConfigItem, SelectStringConfigItem } from \"@adaline/provider\";\n\nconst temperature = RangeConfigItem({\n param: \"temperature\",\n title: CHAT_CONFIG.TEMPERATURE.title,\n description: CHAT_CONFIG.TEMPERATURE.description,\n min: 0,\n max: 2,\n step: 0.01,\n default: 1,\n});\n\nconst maxTokens = (maxOutputTokens: number) =>\n RangeConfigItem({\n param: \"max_tokens\",\n title: CHAT_CONFIG.MAX_TOKENS.title,\n description: CHAT_CONFIG.MAX_TOKENS.description,\n min: 0,\n max: maxOutputTokens,\n step: 1,\n default: 0,\n });\n\nconst stop = MultiStringConfigItem({\n param: \"stop\",\n title: CHAT_CONFIG.STOP(4).title,\n description: CHAT_CONFIG.STOP(4).description,\n max: 4,\n });\n\nconst topP = RangeConfigItem({\n param: \"top_p\",\n title: CHAT_CONFIG.TOP_P.title,\n description: CHAT_CONFIG.TOP_P.description,\n min: 0,\n max: 1,\n step: 0.01,\n default: 1,\n});\n\nconst frequencyPenalty = RangeConfigItem({\n param: \"frequency_penalty\",\n title: CHAT_CONFIG.FREQUENCY_PENALTY.title,\n description: CHAT_CONFIG.FREQUENCY_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst presencePenalty = RangeConfigItem({\n param: \"presence_penalty\",\n title: CHAT_CONFIG.PRESENCE_PENALTY.title,\n description: CHAT_CONFIG.PRESENCE_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst seed = RangeConfigItem({\n param: \"seed\",\n title: CHAT_CONFIG.SEED.title,\n description: CHAT_CONFIG.SEED.description,\n min: 0,\n max: 1000000,\n step: 1,\n default: 0,\n});\n\nconst responseFormat = SelectStringConfigItem({\n param: \"response_format\",\n title: CHAT_CONFIG.RESPONSE_FORMAT.title,\n description: CHAT_CONFIG.RESPONSE_FORMAT.description,\n default: \"text\",\n choices: [\"text\", \"json_object\"],\n});\n\nconst toolChoice = SelectStringConfigItem({\n param: \"tool_choice\",\n title: \"Tool choice\",\n description:\n \"Controls which (if any) tool is called by the model. \\\n 'none' means the model will not call a function. \\\n 'auto' means the model can pick between generating a message or calling a tool.\",\n default: \"auto\",\n choices: [\"auto\", \"required\", \"none\"],\n});\n\nexport { \n frequencyPenalty, \n maxTokens, \n presencePenalty, \n seed, \n stop, \n temperature, \n toolChoice, \n topP, \n responseFormat \n};\n","import { z } from \"zod\";\n\nimport {\n frequencyPenalty,\n maxTokens,\n presencePenalty,\n seed,\n stop,\n temperature,\n toolChoice,\n topP,\n responseFormat,\n} from \"./common.config.chat-model.groq\";\n\nconst ChatModelBaseConfigSchema = (maxOutputTokens: number) =>\n z.object({\n temperature: temperature.schema,\n maxTokens: maxTokens(maxOutputTokens).schema,\n stop: stop.schema,\n topP: topP.schema,\n frequencyPenalty: frequencyPenalty.schema,\n presencePenalty: presencePenalty.schema,\n seed: seed.schema.transform((value) => (value === 0 ? undefined : value)),\n responseFormat: responseFormat.schema,\n toolChoice: toolChoice.schema,\n });\n\nconst ChatModelBaseConfigDef = (maxOutputTokens: number) =>\n ({\n temperature: temperature.def,\n maxTokens: maxTokens(maxOutputTokens).def,\n stop: stop.def,\n topP: topP.def,\n frequencyPenalty: frequencyPenalty.def,\n presencePenalty: presencePenalty.def,\n seed: seed.def,\n responseFormat: responseFormat.def,\n toolChoice: toolChoice.def,\n }) as const;\n\nexport { ChatModelBaseConfigDef, ChatModelBaseConfigSchema };\n","import {\n ChatModelBaseConfigDef,\n ChatModelBaseConfigSchema,\n} from \"./chat-model\";\n\nconst GroqChatModelConfigs = {\n base: (maxOutputTokens: number) => ({\n def: ChatModelBaseConfigDef(maxOutputTokens),\n schema: ChatModelBaseConfigSchema(maxOutputTokens),\n }),\n} as const;\n\nexport { GroqChatModelConfigs };\n","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Mixtral_8x7bLiteral = \"mixtral-8x7b-32768\" as const;\n// https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1\nconst Mixtral_8x7bDescription = \n \"The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts.\";\n\nconst Mixtral_8x7bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Mixtral_8x7bLiteral,\n description: Mixtral_8x7bDescription,\n maxInputTokens: 32768,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Mixtral_8x7bOptions = BaseChatModelOptions;\ntype Mixtral_8x7bOptionsType = z.infer<typeof Mixtral_8x7bOptions>;\n\nclass Mixtral_8x7b extends BaseChatModelGroq {\n constructor(options: Mixtral_8x7bOptionsType) {\n super(Mixtral_8x7bSchema, options);\n }\n}\n\nexport {\n Mixtral_8x7b,\n Mixtral_8x7bOptions,\n Mixtral_8x7bSchema,\n Mixtral_8x7bLiteral,\n type Mixtral_8x7bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst LlamaGuard_3_8bLiteral = \"llama-guard-3-8b\" as const;\n// https://huggingface.co/meta-llama/Llama-Guard-3-8B\nconst LlamaGuard_3_8bDescription = \n \"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.\";\n\nconst LlamaGuard_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: LlamaGuard_3_8bLiteral,\n description: LlamaGuard_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst LlamaGuard_3_8bOptions = BaseChatModelOptions;\ntype LlamaGuard_3_8bOptionsType = z.infer<typeof LlamaGuard_3_8bOptions>;\n\nclass LlamaGuard_3_8b extends BaseChatModelGroq {\n constructor(options: LlamaGuard_3_8bOptionsType) {\n super(LlamaGuard_3_8bSchema, options);\n }\n}\n\nexport {\n LlamaGuard_3_8b,\n LlamaGuard_3_8bOptions,\n LlamaGuard_3_8bSchema,\n LlamaGuard_3_8bLiteral,\n type LlamaGuard_3_8bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8bLiteral = \"llama3-8b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct\nconst Llama_3_8bDescription = \n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8bLiteral,\n description: Llama_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8bOptions = BaseChatModelOptions;\ntype Llama_3_8bOptionsType = z.infer<typeof Llama_3_8bOptions>;\n\nclass Llama_3_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_8bOptionsType) {\n super(Llama_3_8bSchema, options);\n }\n}\n\nexport {\n Llama_3_8b,\n Llama_3_8bOptions,\n Llama_3_8bSchema,\n Llama_3_8bLiteral,\n type Llama_3_8bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70bLiteral = \"llama3-70b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct\nconst Llama_3_70bDescription = \n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70bLiteral,\n description: Llama_3_70bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70bOptions = BaseChatModelOptions;\ntype Llama_3_70bOptionsType = z.infer<typeof Llama_3_70bOptions>;\n\nclass Llama_3_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_70bOptionsType) {\n super(Llama_3_70bSchema, options);\n }\n}\n\nexport {\n Llama_3_70b,\n Llama_3_70bOptions,\n Llama_3_70bSchema,\n Llama_3_70bLiteral,\n type Llama_3_70bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70b_Tool_UseLiteral = \"llama3-groq-70b-8192-tool-use-preview\" as const;\n// https://huggingface.co/Groq/Llama-3-Groq-70B-Tool-Use\nconst Llama_3_70b_Tool_UseDescription = \n \"This is the 70B parameter version of the Llama 3 Groq Tool Use model, \\\n specifically designed for advanced tool use and function calling tasks.\";\n\nconst Llama_3_70b_Tool_UseSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70b_Tool_UseLiteral,\n description: Llama_3_70b_Tool_UseDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70b_Tool_Use_Options = BaseChatModelOptions;\ntype Llama_3_70b_Tool_Use_OptionsType = z.infer<typeof Llama_3_70b_Tool_Use_Options>;\n\nclass Llama_3_70b_Tool_Use extends BaseChatModelGroq {\n constructor(options: Llama_3_70b_Tool_Use_OptionsType) {\n super(Llama_3_70b_Tool_UseSchema, options);\n }\n}\n\nexport {\n Llama_3_70b_Tool_Use,\n Llama_3_70b_Tool_Use_Options,\n Llama_3_70b_Tool_UseSchema,\n Llama_3_70b_Tool_UseLiteral,\n type Llama_3_70b_Tool_Use_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8b_Tool_UseLiteral = \"llama3-groq-8b-8192-tool-use-preview\" as const;\n// https://huggingface.co/Groq/Llama-3-Groq-8B-Tool-Use\nconst Llama_3_8b_Tool_UseDescription = \n \"This is the 8B parameter version of the Llama 3 Groq Tool Use model, \\\n specifically designed for advanced tool use and function calling tasks.\";\n\nconst Llama_3_8b_Tool_UseSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8b_Tool_UseLiteral,\n description: Llama_3_8b_Tool_UseDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8b_Tool_Use_Options = BaseChatModelOptions;\ntype Llama_3_8b_Tool_Use_OptionsType = z.infer<typeof Llama_3_8b_Tool_Use_Options>;\n\nclass Llama_3_8b_Tool_Use extends BaseChatModelGroq {\n constructor(options: Llama_3_8b_Tool_Use_OptionsType) {\n super(Llama_3_8b_Tool_UseSchema, options);\n }\n}\n\nexport {\n Llama_3_8b_Tool_Use,\n Llama_3_8b_Tool_Use_Options,\n Llama_3_8b_Tool_UseSchema,\n Llama_3_8b_Tool_UseLiteral,\n type Llama_3_8b_Tool_Use_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelModalities,\n OpenAIChatModelModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_11b_VisionLiteral = \"llama-3.2-11b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-11B-Vision\nconst Llama_3_2_11b_VisionDescription = \n \"The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, \\\n and answering general questions about an image. \\\n The models outperform many of the available open source and closed multimodal models on common industry benchmarks.\";\n\nconst Llama_3_2_11b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_11b_VisionLiteral,\n description: Llama_3_2_11b_VisionDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_11b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_11b_VisionOptionsType = z.infer<typeof Llama_3_2_11b_VisionOptions>;\n\nclass Llama_3_2_11b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_11b_VisionOptionsType) {\n super(Llama_3_2_11b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n type Llama_3_2_11b_VisionOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_3bLiteral = \"llama-3.2-3b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-3B\nconst Llama_3_2_3bDescription = \n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_3bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_3bLiteral,\n description: Llama_3_2_3bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_3b_Options = BaseChatModelOptions;\ntype Llama_3_2_3b_OptionsType = z.infer<typeof Llama_3_2_3b_Options>;\n\nclass Llama_3_2_3b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_3b_OptionsType) {\n super(Llama_3_2_3bSchema, options);\n }\n}\n\nexport {\n Llama_3_2_3b,\n Llama_3_2_3b_Options,\n Llama_3_2_3bSchema,\n Llama_3_2_3bLiteral,\n type Llama_3_2_3b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_1bLiteral = \"llama-3.2-1b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-1B\nconst Llama_3_2_1bDescription = \n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_1bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_1bLiteral,\n description: Llama_3_2_1bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_1b_Options = BaseChatModelOptions;\ntype Llama_3_2_1b_OptionsType = z.infer<typeof Llama_3_2_1b_Options>;\n\nclass Llama_3_2_1b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_1b_OptionsType) {\n super(Llama_3_2_1bSchema, options);\n }\n}\n\nexport {\n Llama_3_2_1b,\n Llama_3_2_1b_Options,\n Llama_3_2_1bSchema,\n Llama_3_2_1bLiteral,\n type Llama_3_2_1b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_70bLiteral = \"llama-3.1-70b-versatile\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_70bDescription = \n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_70bLiteral,\n description: Llama_3_1_70bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_70b_Options = BaseChatModelOptions;\ntype Llama_3_1_70b_OptionsType = z.infer<typeof Llama_3_1_70b_Options>;\n\nclass Llama_3_1_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_70b_OptionsType) {\n super(Llama_3_1_70bSchema, options);\n }\n}\n\nexport {\n Llama_3_1_70b,\n Llama_3_1_70b_Options,\n Llama_3_1_70bSchema,\n Llama_3_1_70bLiteral,\n type Llama_3_1_70b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_8bLiteral = \"llama-3.1-8b-instant\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_8bDescription = \n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_8bLiteral,\n description: Llama_3_1_8bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_8b_Options = BaseChatModelOptions;\ntype Llama_3_1_8b_OptionsType = z.infer<typeof Llama_3_1_8b_Options>;\n\nclass Llama_3_1_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_8b_OptionsType) {\n super(Llama_3_1_8bSchema, options);\n }\n}\n\nexport {\n Llama_3_1_8b,\n Llama_3_1_8b_Options,\n Llama_3_1_8bSchema,\n Llama_3_1_8bLiteral,\n type Llama_3_1_8b_OptionsType,\n}"]}
|
|
1
|
+
{"version":3,"sources":["../src/models/chat-models/base-chat-model.groq.ts","../src/configs/chat-model/common.config.chat-model.groq.ts","../src/configs/chat-model/base.config.chat-model.groq.ts","../src/configs/configs.groq.ts","../src/models/chat-models/mixtral-8x7b.groq.ts","../src/models/chat-models/llama-guard-3-8b.groq.ts","../src/models/chat-models/llama-3-8b.groq.ts","../src/models/chat-models/llama-3-70b.groq.ts","../src/models/chat-models/llama-3-70b-tool-use.groq.ts","../src/models/chat-models/llama-3-8b-tool-use.groq.ts","../src/models/chat-models/llama-3-2-11b-vision.groq.ts","../src/models/chat-models/llama-3-2-3b.groq.ts","../src/models/chat-models/llama-3-2-1b.groq.ts","../src/models/chat-models/llama-3-1-70b.groq.ts","../src/models/chat-models/llama-3-1-8b.groq.ts","../src/provider/provider.groq.ts"],"names":["BaseChatModelOptions","z","BaseChatModelGroq","BaseChatModel","modelSchema","options","parsedOptions","baseUrl","Groq","messages","transformedMessages","message","content","temperature","RangeConfigItem","CHAT_CONFIG","maxTokens","maxOutputTokens","stop","MultiStringConfigItem","topP","frequencyPenalty","presencePenalty","seed","responseFormat","SelectStringConfigItem","toolChoice","ChatModelBaseConfigSchema","value","ChatModelBaseConfigDef","GroqChatModelConfigs","Mixtral_8x7bLiteral","Mixtral_8x7bDescription","Mixtral_8x7bSchema","ChatModelSchema","OpenAIChatModelRoles","OpenAIChatModelTextToolModalitiesEnum","OpenAIChatModelRolesMap","OpenAIChatModelTextToolModalities","Mixtral_8x7bOptions","Mixtral_8x7b","LlamaGuard_3_8bLiteral","LlamaGuard_3_8bDescription","LlamaGuard_3_8bSchema","LlamaGuard_3_8bOptions","LlamaGuard_3_8b","Llama_3_8bLiteral","Llama_3_8bDescription","Llama_3_8bSchema","Llama_3_8bOptions","Llama_3_8b","Llama_3_70bLiteral","Llama_3_70bDescription","Llama_3_70bSchema","Llama_3_70bOptions","Llama_3_70b","Llama_3_70b_Tool_UseLiteral","Llama_3_70b_Tool_UseDescription","Llama_3_70b_Tool_UseSchema","Llama_3_70b_Tool_Use_Options","Llama_3_70b_Tool_Use","Llama_3_8b_Tool_UseLiteral","Llama_3_8b_Tool_UseDescription","Llama_3_8b_Tool_UseSchema","Llama_3_8b_Tool_Use_Options","Llama_3_8b_Tool_Use","Llama_3_2_11b_VisionLiteral","Llama_3_2_11b_VisionDescription","Llama_3_2_11b_VisionSchema","OpenAIChatModelModalitiesEnum","OpenAIChatModelModalities","Llama_3_2_11b_VisionOptions","Llama_3_2_11b_Vision","Llama_3_2_3bLiteral","Llama_3_2_3bDescription","Llama_3_2_3bSchema","Llama_3_2_3b_Options","Llama_3_2_3b","Llama_3_2_1bLiteral","Llama_3_2_1bDescription","Llama_3_2_1bSchema","Llama_3_2_1b_Options","Llama_3_2_1b","Llama_3_1_70bLiteral","Llama_3_1_70bDescription","Llama_3_1_70bSchema","Llama_3_1_70b_Options","Llama_3_1_70b","Llama_3_1_8bLiteral","Llama_3_1_8bDescription","Llama_3_1_8bSchema","Llama_3_1_8b_Options","Llama_3_1_8b","ProviderLiteral","name","ProviderError","model","modelOptions","__spreadValues","acc","key"],"mappings":";;;;yWAQMA,CAAuBC,CAAAA,GAAAA,CAAE,OAAO,CACpC,MAAA,CAAQA,IAAE,MAAO,EAAA,CAAE,GAAI,CAAA,CAAC,CAC1B,CAAC,EAGKC,CAAN,CAAA,cAAgCC,aAAc,CAM5C,WAAA,CAAYC,EAAkCC,CAAmC,CAAA,CAC/E,IAAMC,CAAAA,CAAgBN,CAAqB,CAAA,KAAA,CAAMK,CAAO,CAClDE,CAAAA,CAAAA,CAAUC,CAAK,CAAA,OAAA,CACrB,KAAMJ,CAAAA,CAAAA,CAAa,CACjB,MAAQE,CAAAA,CAAAA,CAAc,MACtB,CAAA,OAAA,CAASC,CACT,CAAA,eAAA,CAAiB,GAAGA,CAAO,CAAA,iBAAA,CAAA,CAC3B,cAAe,CAAGA,EAAAA,CAAO,mBAC3B,CAAC,CAAA,CAbH,IAAS,CAAA,OAAA,CAAU,IAcjB,CAAA,IAAA,CAAK,YAAcH,CACnB,CAAA,IAAA,CAAK,WAAaE,CAAc,CAAA,OAClC,CAEA,iBAAkBG,CAAAA,CAAAA,CAAqC,CACrD,IAAMC,CAAsB,CAAA,KAAA,CAAM,kBAAkBD,CAAQ,CAAA,CAI5D,OAAAC,CAAoB,CAAA,QAAA,CAAS,QAASC,CAAY,EAAA,CAC5CA,CAAQ,CAAA,IAAA,GAAS,QACf,CAAA,OAAOA,EAAQ,OAAY,EAAA,QAAA,GAC7BA,CAAQ,CAAA,OAAA,CAAUA,CAAQ,CAAA,OAAA,CAAQ,IAAKC,CAAYA,EAAAA,CAAAA,CAAQ,IAAI,CAAA,CAAE,IAAK,CAAA,CAAA;AAAA,CAAI,GAEnED,CAAQ,CAAA,IAAA,GAAS,aACtBA,CAAQ,CAAA,OAAA,EAAW,OAAOA,CAAQ,CAAA,OAAA,EAAY,WAChDA,CAAQ,CAAA,OAAA,CAAUA,EAAQ,OAAQ,CAAA,GAAA,CAAKC,GAAYA,CAAQ,CAAA,IAAI,EAAE,IAAK,CAAA,CAAA;AAAA,CAAI,CAGhF,EAAA,CAAC,CAEMF,CAAAA,CACT,CAIF,ECpDA,IAAMG,CAAcC,CAAAA,eAAAA,CAAgB,CAClC,KAAA,CAAO,aACP,CAAA,KAAA,CAAOC,WAAY,CAAA,WAAA,CAAY,MAC/B,WAAaA,CAAAA,WAAAA,CAAY,WAAY,CAAA,WAAA,CACrC,IAAK,CACL,CAAA,GAAA,CAAK,CACL,CAAA,IAAA,CAAM,IACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEKC,EAAaC,CACjBH,EAAAA,eAAAA,CAAgB,CACd,KAAA,CAAO,aACP,KAAOC,CAAAA,WAAAA,CAAY,UAAW,CAAA,KAAA,CAC9B,YAAaA,WAAY,CAAA,UAAA,CAAW,WACpC,CAAA,GAAA,CAAK,EACL,GAAKE,CAAAA,CAAAA,CACL,IAAM,CAAA,CAAA,CACN,OAAS,CAAA,CACX,CAAC,CAAA,CAEGC,EAAOC,qBAAsB,CAAA,CAC/B,KAAO,CAAA,MAAA,CACP,MAAOJ,WAAY,CAAA,IAAA,CAAK,CAAC,CAAA,CAAE,MAC3B,WAAaA,CAAAA,WAAAA,CAAY,IAAK,CAAA,CAAC,EAAE,WACjC,CAAA,GAAA,CAAK,CACP,CAAC,EAEGK,CAAON,CAAAA,eAAAA,CAAgB,CAC3B,KAAA,CAAO,QACP,KAAOC,CAAAA,WAAAA,CAAY,KAAM,CAAA,KAAA,CACzB,YAAaA,WAAY,CAAA,KAAA,CAAM,WAC/B,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,CAAA,CACL,IAAM,CAAA,GAAA,CACN,QAAS,CACX,CAAC,CAEKM,CAAAA,CAAAA,CAAmBP,gBAAgB,CACvC,KAAA,CAAO,mBACP,CAAA,KAAA,CAAOC,YAAY,iBAAkB,CAAA,KAAA,CACrC,WAAaA,CAAAA,WAAAA,CAAY,kBAAkB,WAC3C,CAAA,GAAA,CAAK,CACL,CAAA,CAAA,GAAA,CAAK,EACL,IAAM,CAAA,GAAA,CACN,OAAS,CAAA,CACX,CAAC,CAEKO,CAAAA,CAAAA,CAAkBR,eAAgB,CAAA,CACtC,MAAO,kBACP,CAAA,KAAA,CAAOC,WAAY,CAAA,gBAAA,CAAiB,KACpC,CAAA,WAAA,CAAaA,WAAY,CAAA,gBAAA,CAAiB,YAC1C,GAAK,CAAA,CAAA,CAAA,CACL,GAAK,CAAA,CAAA,CACL,KAAM,GACN,CAAA,OAAA,CAAS,CACX,CAAC,EAEKQ,CAAOT,CAAAA,eAAAA,CAAgB,CAC3B,KAAA,CAAO,OACP,KAAOC,CAAAA,WAAAA,CAAY,IAAK,CAAA,KAAA,CACxB,YAAaA,WAAY,CAAA,IAAA,CAAK,WAC9B,CAAA,GAAA,CAAK,EACL,GAAK,CAAA,GAAA,CACL,IAAM,CAAA,CAAA,CACN,QAAS,CACX,CAAC,CAEKS,CAAAA,CAAAA,CAAiBC,uBAAuB,CAC5C,KAAA,CAAO,iBACP,CAAA,KAAA,CAAOV,YAAY,eAAgB,CAAA,KAAA,CACnC,WAAaA,CAAAA,WAAAA,CAAY,gBAAgB,WACzC,CAAA,OAAA,CAAS,MACT,CAAA,OAAA,CAAS,CAAC,MAAQ,CAAA,aAAa,CACjC,CAAC,EAEKW,CAAaD,CAAAA,sBAAAA,CAAuB,CACxC,KAAA,CAAO,cACP,KAAO,CAAA,aAAA,CACP,WACE,CAAA,+LAAA,CAGF,QAAS,MACT,CAAA,OAAA,CAAS,CAAC,MAAA,CAAQ,WAAY,MAAM,CACtC,CAAC,CAAA,CCzED,IAAME,EAAAA,CAA6BV,CACjChB,EAAAA,GAAAA,CAAE,OAAO,CACP,WAAA,CAAaY,CAAY,CAAA,MAAA,CACzB,UAAWG,CAAUC,CAAAA,CAAe,CAAE,CAAA,MAAA,CACtC,KAAMC,CAAK,CAAA,MAAA,CACX,IAAME,CAAAA,CAAAA,CAAK,OACX,gBAAkBC,CAAAA,CAAAA,CAAiB,MACnC,CAAA,eAAA,CAAiBC,EAAgB,MACjC,CAAA,IAAA,CAAMC,CAAK,CAAA,MAAA,CAAO,UAAWK,CAAWA,EAAAA,CAAAA,GAAU,CAAI,CAAA,KAAA,CAAA,CAAYA,CAAM,CACxE,CAAA,cAAA,CAAgBJ,CAAe,CAAA,MAAA,CAC/B,WAAYE,CAAW,CAAA,MACzB,CAAC,CAAA,CAEGG,GAA0BZ,CAC7B,GAAA,CACC,WAAaJ,CAAAA,CAAAA,CAAY,IACzB,SAAWG,CAAAA,CAAAA,CAAUC,CAAe,CAAA,CAAE,IACtC,IAAMC,CAAAA,CAAAA,CAAK,GACX,CAAA,IAAA,CAAME,EAAK,GACX,CAAA,gBAAA,CAAkBC,CAAiB,CAAA,GAAA,CACnC,gBAAiBC,CAAgB,CAAA,GAAA,CACjC,IAAMC,CAAAA,CAAAA,CAAK,IACX,cAAgBC,CAAAA,CAAAA,CAAe,GAC/B,CAAA,UAAA,CAAYE,EAAW,GACzB,CAAA,CAAA,CCjCF,IAAMI,CAAAA,CAAuB,CAC3B,IAAA,CAAOb,CAA6B,GAAA,CAClC,IAAKY,EAAuBZ,CAAAA,CAAe,CAC3C,CAAA,MAAA,CAAQU,GAA0BV,CAAe,CACnD,CACF,CAAA,CAAA,KCIMc,CAAsB,CAAA,oBAAA,CAEtBC,EACJ,CAAA,mGAAA,CAEIC,EAAqBC,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAA,CAAE,MAAM,CAC5G,IAAA,CAAML,CACN,CAAA,WAAA,CAAaC,GACb,cAAgB,CAAA,KAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAOK,uBACP,CAAA,UAAA,CAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKS,CAAAA,EAAAA,CAAsBvC,CAGtBwC,CAAAA,CAAAA,CAAN,cAA2BtC,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAkC,CAC5C,KAAM4B,CAAAA,CAAAA,CAAoB5B,CAAO,EACnC,CACF,ECzBA,IAAMoC,CAAyB,CAAA,kBAAA,CAEzBC,GACJ,iGAEIC,CAAAA,CAAAA,CAAwBT,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC/G,IAAA,CAAMK,EACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,IAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAOL,uBACP,CAAA,UAAA,CAAYC,kCACZ,MAAQ,CAAA,CACN,GAAKR,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAAA,CAEKc,EAAyB5C,CAAAA,CAAAA,CAGzB6C,EAAN,cAA8B3C,CAAkB,CAC9C,WAAA,CAAYG,EAAqC,CAC/C,KAAA,CAAMsC,CAAuBtC,CAAAA,CAAO,EACtC,CACF,ECzBA,IAAMyC,CAAoB,CAAA,gBAAA,CAEpBC,EACJ,CAAA,yKAAA,CAGIC,CAAmBd,CAAAA,eAAAA,CAAgBC,qBAAsBC,qCAAqC,CAAA,CAAE,KAAM,CAAA,CAC1G,KAAMU,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,eAAiB,CAAA,IAAA,CACjB,KAAOV,CAAAA,uBAAAA,CACP,WAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEKmB,CAAAA,EAAAA,CAAoBjD,EAGpBkD,CAAN,CAAA,cAAyBhD,CAAkB,CACzC,YAAYG,CAAgC,CAAA,CAC1C,KAAM2C,CAAAA,CAAAA,CAAkB3C,CAAO,EACjC,CACF,EC1BM8C,IAAAA,CAAAA,CAAqB,kBAErBC,EACJ,CAAA,yKAAA,CAGIC,CAAoBnB,CAAAA,eAAAA,CAAgBC,qBAAsBC,qCAAqC,CAAA,CAAE,KAAM,CAAA,CAC3G,IAAMe,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOf,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,IAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,IACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CACF,CAAC,CAAA,CAEKwB,GAAqBtD,CAGrBuD,CAAAA,CAAAA,CAAN,cAA0BrD,CAAkB,CAC1C,WAAYG,CAAAA,CAAAA,CAAiC,CAC3C,KAAA,CAAMgD,EAAmBhD,CAAO,EAClC,CACF,EC1BA,IAAMmD,EAA8B,uCAE9BC,CAAAA,EAAAA,CACJ,iJAGIC,CAAAA,CAAAA,CAA6BxB,gBAAgBC,oBAAsBC,CAAAA,qCAAqC,CAAE,CAAA,KAAA,CAAM,CACpH,IAAMoB,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,IAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAOpB,uBACP,CAAA,UAAA,CAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,EAEK6B,EAA+B3D,CAAAA,CAAAA,CAG/B4D,CAAN,CAAA,cAAmC1D,CAAkB,CACnD,WAAA,CAAYG,CAA2C,CAAA,CACrD,MAAMqD,CAA4BrD,CAAAA,CAAO,EAC3C,CACF,EC1BMwD,IAAAA,CAAAA,CAA6B,sCAE7BC,CAAAA,EAAAA,CACJ,iJAGIC,CAA4B7B,CAAAA,eAAAA,CAAgBC,oBAAsBC,CAAAA,qCAAqC,EAAE,KAAM,CAAA,CACnH,IAAMyB,CAAAA,CAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,IAChB,CAAA,eAAA,CAAiB,KACjB,KAAOzB,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAAA,CAEKkC,EAA8BhE,CAAAA,CAAAA,CAG9BiE,EAAN,cAAkC/D,CAAkB,CAClD,WAAA,CAAYG,EAA0C,CACpD,KAAA,CAAM0D,CAA2B1D,CAAAA,CAAO,EAC1C,CACF,EC1BA,IAAM6D,CAA8B,CAAA,8BAAA,CAE9BC,GACJ,0RAIIC,CAAAA,CAAAA,CAA6BlC,eAAgBC,CAAAA,oBAAAA,CAAsBkC,6BAA6B,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAA,CAAMH,EACN,WAAaC,CAAAA,EAAAA,CACb,cAAgB,CAAA,KAAA,CAChB,gBAAiB,IACjB,CAAA,KAAA,CAAO9B,uBACP,CAAA,UAAA,CAAYiC,0BACZ,MAAQ,CAAA,CACN,GAAKxC,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,GACrC,CAAA,MAAA,CAAQA,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,CAEKyC,CAAAA,EAAAA,CAA8BvE,EAG9BwE,CAAN,CAAA,cAAmCtE,CAAkB,CACnD,YAAYG,CAA0C,CAAA,CACpD,KAAM+D,CAAAA,CAAAA,CAA4B/D,CAAO,EAC3C,CACF,EC3BMoE,IAAAA,CAAAA,CAAsB,uBAEtBC,EACJ,CAAA,oQAAA,CAGIC,CAAqBzC,CAAAA,eAAAA,CAAgBC,qBAAsBC,qCAAqC,CAAA,CAAE,KAAM,CAAA,CAC5G,KAAMqC,CACN,CAAA,WAAA,CAAaC,EACb,CAAA,cAAA,CAAgB,MAChB,eAAiB,CAAA,IAAA,CACjB,KAAOrC,CAAAA,uBAAAA,CACP,WAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAA,CAAKR,EAAqB,IAAK,CAAA,IAAI,CAAE,CAAA,GAAA,CACrC,OAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEK8C,CAAAA,EAAAA,CAAuB5E,EAGvB6E,CAAN,CAAA,cAA2B3E,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAMsE,EAAoBtE,CAAO,EACnC,CACF,EC1BA,IAAMyE,EAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,oQAGIC,CAAAA,CAAAA,CAAqB9C,gBAAgBC,oBAAsBC,CAAAA,qCAAqC,CAAE,CAAA,KAAA,CAAM,CAC5G,IAAM0C,CAAAA,CAAAA,CACN,WAAaC,CAAAA,EAAAA,CACb,eAAgB,KAChB,CAAA,eAAA,CAAiB,IACjB,CAAA,KAAA,CAAO1C,wBACP,UAAYC,CAAAA,iCAAAA,CACZ,MAAQ,CAAA,CACN,IAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAA,CAAE,IACrC,MAAQA,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,MAC1C,CACF,CAAC,CAAA,CAEKmD,GAAuBjF,CAGvBkF,CAAAA,CAAAA,CAAN,cAA2BhF,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAA,CAAM2E,EAAoB3E,CAAO,EACnC,CACF,MC1BM8E,CAAuB,CAAA,yBAAA,CAEvBC,EACJ,CAAA,6NAAA,CAGIC,EAAsBnD,eAAgBC,CAAAA,oBAAAA,CAAsBC,qCAAqC,CAAA,CAAE,MAAM,CAC7G,IAAA,CAAM+C,CACN,CAAA,WAAA,CAAaC,GACb,cAAgB,CAAA,KAAA,CAChB,eAAiB,CAAA,IAAA,CACjB,MAAO/C,uBACP,CAAA,UAAA,CAAYC,iCACZ,CAAA,MAAA,CAAQ,CACN,GAAKR,CAAAA,CAAAA,CAAqB,IAAK,CAAA,IAAI,EAAE,GACrC,CAAA,MAAA,CAAQA,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,MAC1C,CACF,CAAC,EAEKwD,EAAwBtF,CAAAA,CAAAA,CAGxBuF,CAAN,CAAA,cAA4BrF,CAAkB,CAC5C,WAAA,CAAYG,CAAoC,CAAA,CAC9C,MAAMgF,CAAqBhF,CAAAA,CAAO,EACpC,CACF,EC1BMmF,IAAAA,EAAAA,CAAsB,sBAEtBC,CAAAA,EAAAA,CACJ,8NAGIC,EAAqBxD,CAAAA,eAAAA,CAAgBC,oBAAsBC,CAAAA,qCAAqC,EAAE,KAAM,CAAA,CAC5G,IAAMoD,CAAAA,EAAAA,CACN,YAAaC,EACb,CAAA,cAAA,CAAgB,KAChB,CAAA,eAAA,CAAiB,KACjB,KAAOpD,CAAAA,uBAAAA,CACP,UAAYC,CAAAA,iCAAAA,CACZ,OAAQ,CACN,GAAA,CAAKR,CAAqB,CAAA,IAAA,CAAK,IAAI,CAAE,CAAA,GAAA,CACrC,MAAQA,CAAAA,CAAAA,CAAqB,KAAK,IAAI,CAAA,CAAE,MAC1C,CACF,CAAC,CAEK6D,CAAAA,EAAAA,CAAuB3F,CAGvB4F,CAAAA,CAAAA,CAAN,cAA2B1F,CAAkB,CAC3C,WAAYG,CAAAA,CAAAA,CAAmC,CAC7C,KAAMqF,CAAAA,EAAAA,CAAoBrF,CAAO,EACnC,CACF,ECkBA,IAAMwF,EAAkB,CAAA,MAAA,CAClBrF,EAAN,KAAyF,CAAzF,WACE,EAAA,CAAA,IAAA,CAAS,QAAU,IACnB,CAAA,IAAA,CAAS,IAAOqF,CAAAA,EAAAA,CAGhB,KAAiB,kBAKV,CAAA,CACH,CAACpD,CAAsB,EAAG,CACxB,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAACG,CAAiB,EAAG,CACnB,KAAA,CAAOI,EACP,YAAcD,CAAAA,EAAAA,CACd,WAAaD,CAAAA,CACf,EACA,CAACG,CAAkB,EAAG,CACpB,MAAOI,CACP,CAAA,YAAA,CAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CACA,CAAA,CAACmC,EAAmB,EAAG,CACrB,KAAOI,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaD,EACf,CAAA,CACA,CAAC7B,CAA0B,EAAG,CAC5B,KAAA,CAAOI,CACP,CAAA,YAAA,CAAcD,GACd,WAAaD,CAAAA,CACf,CACA,CAAA,CAACoB,CAAoB,EAAG,CACtB,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAAC7B,CAA2B,EAAG,CAC7B,KAAA,CAAOI,EACP,YAAcD,CAAAA,EAAAA,CACd,WAAaD,CAAAA,CACf,EACA,CAACQ,CAA2B,EAAG,CAC7B,KAAOM,CAAAA,CAAAA,CACP,YAAcD,CAAAA,EAAAA,CACd,YAAaH,CACf,CAAA,CACA,CAACK,CAAmB,EAAG,CACrB,KAAA,CAAOI,CACP,CAAA,YAAA,CAAcD,GACd,WAAaD,CAAAA,CACf,CACA,CAAA,CAACG,CAAmB,EAAG,CACrB,KAAOI,CAAAA,CAAAA,CACP,aAAcD,EACd,CAAA,WAAA,CAAaD,CACf,CAAA,CACA,CAACjD,CAAmB,EAAG,CACrB,KAAA,CAAOS,EACP,YAAcD,CAAAA,EAAAA,CACd,WAAaN,CAAAA,CACf,CACF,CAEF,CAAA,IAAA,CAAiB,uBAKZ,CAAA,IAEL,iBAA8B,EAAA,CAC5B,OAAO,MAAA,CAAO,KAAK,IAAK,CAAA,kBAAkB,CAC5C,CAEA,UAAU6D,CAAczF,CAAAA,CAAAA,CAAyB,CAC/C,GAAI,EAAEyF,CAAQ,IAAA,IAAA,CAAK,kBACjB,CAAA,CAAA,MAAM,IAAIC,aAAc,CAAA,CACtB,IAAM,CAAA,CAAA,iBAAA,EAAoBD,CAAI,CAC9B,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,oBAAoBA,CAAI,CAAA;AAAA,UACrC,EAAA,IAAA,CAAK,iBAAkB,EAAA,CAAE,IAAK,CAAA,IAAI,CAAC,CAAA,CAAE,CAC3C,CAAC,CAGH,CAAA,IAAME,CAAQ,CAAA,IAAA,CAAK,mBAAmBF,CAAI,CAAA,CAAE,KACtCG,CAAAA,CAAAA,CAAeC,CAAA,CAAA,EAAA,CAChB7F,CAECC,CAAAA,CAAAA,CAAAA,CAAgB,IAAK,CAAA,kBAAA,CAAmBwF,CAAI,CAAA,CAAE,YAAa,CAAA,KAAA,CAAMG,CAAY,CACnF,CAAA,OAAO,IAAID,CAAAA,CAAM1F,CAAa,CAChC,CAEA,eAAA,CAAgBwF,CAAmC,CAAA,CACjD,GAAI,EAAEA,CAAQ,IAAA,IAAA,CAAK,oBACjB,MAAM,IAAIC,aAAc,CAAA,CACtB,IAAM,CAAA,CAAA,iBAAA,EAAoBD,CAAI,CAAA,UAAA,CAAA,CAC9B,KAAO,CAAA,IAAI,KAAM,CAAA,CAAA,iBAAA,EAAoBA,CAAI,CAAA;AAAA,UAAA,EACrC,IAAK,CAAA,iBAAA,EAAoB,CAAA,IAAA,CAAK,IAAI,CAAC,CAAA,CAAE,CAC3C,CAAC,EAEH,OAAO,IAAA,CAAK,kBAAmBA,CAAAA,CAAI,EAAE,WACvC,CAEA,gBAAwD,EAAA,CACtD,OAAO,MAAA,CAAO,IAAK,CAAA,IAAA,CAAK,kBAAkB,CAAE,CAAA,MAAA,CAAO,CAACK,CAAAA,CAAKC,KACvDD,CAAIC,CAAAA,CAAG,CAAI,CAAA,IAAA,CAAK,mBAAmBA,CAAG,CAAA,CAAE,WACjCD,CAAAA,CAAAA,CAAAA,CACN,EAAyC,CAC9C,CAEA,wBAAmC,CACjC,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,uBAAuB,CACjD,CAEA,cAAA,CAAeL,EAAczF,CAA8B,CAAA,CACzD,GAAI,EAAEyF,CAAQ,IAAA,IAAA,CAAK,uBACjB,CAAA,CAAA,MAAM,IAAIC,aAAc,CAAA,CACtB,IAAM,CAAA,CAAA,sBAAA,EAAyBD,CAAI,CACnC,UAAA,CAAA,CAAA,KAAA,CAAO,IAAI,KAAA,CAAM,yBAAyBA,CAAI,CAAA;AAAA,UAC1C,EAAA,IAAA,CAAK,sBAAuB,EAAA,CAAE,IAAK,CAAA,IAAI,CAAC,CAAA,CAAE,CAChD,CAAC,CAGH,CAAA,IAAME,CAAQ,CAAA,IAAA,CAAK,wBAAwBF,CAAI,CAAA,CAAE,KAC3CG,CAAAA,CAAAA,CAAeC,CAAA,CAAA,EAAA,CAChB7F,CAECC,CAAAA,CAAAA,CAAAA,CAAgB,IAAK,CAAA,uBAAA,CAAwBwF,CAAI,CAAA,CAAE,YAAa,CAAA,KAAA,CAAMG,CAAY,CACxF,CAAA,OAAO,IAAID,CAAAA,CAAM1F,CAAa,CAChC,CAEA,oBAAA,CAAqBwF,CAAwC,CAAA,CAC3D,GAAI,EAAEA,CAAQ,IAAA,IAAA,CAAK,yBACjB,MAAM,IAAIC,aAAc,CAAA,CACtB,IAAM,CAAA,CAAA,sBAAA,EAAyBD,CAAI,CAAA,UAAA,CAAA,CACnC,KAAO,CAAA,IAAI,KAAM,CAAA,CAAA,sBAAA,EAAyBA,CAAI,CAAA;AAAA,UAC1C,EAAA,IAAA,CAAK,wBAAyB,CAAA,IAAA,CAAK,IAAI,CAAC,CAAA,CAAE,CAChD,CAAC,CAAA,CAEH,OAAO,IAAK,CAAA,uBAAA,CAAwBA,CAAI,CAAE,CAAA,WAC5C,CAEA,qBAAkE,EAAA,CAChE,OAAO,MAAO,CAAA,IAAA,CAAK,KAAK,uBAAuB,CAAA,CAAE,OAAO,CAACK,CAAAA,CAAKC,KAC5DD,CAAIC,CAAAA,CAAG,EAAI,IAAK,CAAA,uBAAA,CAAwBA,CAAG,CAAE,CAAA,WAAA,CACtCD,GACN,EAA8C,CACnD,CACF,EAxJM3F,EAGY,OAAU,CAAA,gCAAA","file":"index.mjs","sourcesContent":["import { z } from \"zod\";\n\nimport { BaseChatModel, OpenAIChatRequestMessageType } from \"@adaline/openai\";\nimport { ChatModelSchemaType, ParamsType } from \"@adaline/provider\";\n\nimport { Groq } from \"../../provider\";\nimport { MessageType } from \"@adaline/types\";\n\nconst BaseChatModelOptions = z.object({\n apiKey: z.string().min(1),\n});\ntype BaseChatModelOptionsType = z.infer<typeof BaseChatModelOptions>;\n\nclass BaseChatModelGroq extends BaseChatModel {\n readonly version = \"v1\" as const;\n modelSchema: ChatModelSchemaType;\n\n private readonly groqApiKey: string;\n\n constructor(modelSchema: ChatModelSchemaType, options: BaseChatModelOptionsType) {\n const parsedOptions = BaseChatModelOptions.parse(options);\n const baseUrl = Groq.baseUrl;\n super(modelSchema, {\n apiKey: parsedOptions.apiKey,\n baseUrl: baseUrl,\n completeChatUrl: `${baseUrl}/chat/completions`,\n streamChatUrl: `${baseUrl}/chat/completions`,\n });\n this.modelSchema = modelSchema;\n this.groqApiKey = parsedOptions.apiKey;\n }\n\n transformMessages(messages: MessageType[]): ParamsType {\n const transformedMessages = super.transformMessages(messages) as { messages: OpenAIChatRequestMessageType[] };\n\n // Groq expects the content to be a string for system and assistant messages\n // OpenAI transformer takes care of validating role and modality \n transformedMessages.messages.forEach((message) => {\n if (message.role === \"system\") {\n if (typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n } else if (message.role === \"assistant\") {\n if (message.content && typeof message.content !== \"string\") {\n message.content = message.content.map((content) => content.text).join(\"\\n\");\n }\n }\n });\n\n return transformedMessages;\n }\n\n\n // TODO: streamChat needs it's own implementation, OpenAI stream transformer is skipping some chunks\n}\n\nexport { \n BaseChatModelGroq, \n BaseChatModelOptions, \n type BaseChatModelOptionsType,\n};\n","import { CHAT_CONFIG, MultiStringConfigItem, RangeConfigItem, SelectStringConfigItem } from \"@adaline/provider\";\n\nconst temperature = RangeConfigItem({\n param: \"temperature\",\n title: CHAT_CONFIG.TEMPERATURE.title,\n description: CHAT_CONFIG.TEMPERATURE.description,\n min: 0,\n max: 2,\n step: 0.01,\n default: 1,\n});\n\nconst maxTokens = (maxOutputTokens: number) =>\n RangeConfigItem({\n param: \"max_tokens\",\n title: CHAT_CONFIG.MAX_TOKENS.title,\n description: CHAT_CONFIG.MAX_TOKENS.description,\n min: 0,\n max: maxOutputTokens,\n step: 1,\n default: 0,\n });\n\nconst stop = MultiStringConfigItem({\n param: \"stop\",\n title: CHAT_CONFIG.STOP(4).title,\n description: CHAT_CONFIG.STOP(4).description,\n max: 4,\n });\n\nconst topP = RangeConfigItem({\n param: \"top_p\",\n title: CHAT_CONFIG.TOP_P.title,\n description: CHAT_CONFIG.TOP_P.description,\n min: 0,\n max: 1,\n step: 0.01,\n default: 1,\n});\n\nconst frequencyPenalty = RangeConfigItem({\n param: \"frequency_penalty\",\n title: CHAT_CONFIG.FREQUENCY_PENALTY.title,\n description: CHAT_CONFIG.FREQUENCY_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst presencePenalty = RangeConfigItem({\n param: \"presence_penalty\",\n title: CHAT_CONFIG.PRESENCE_PENALTY.title,\n description: CHAT_CONFIG.PRESENCE_PENALTY.description,\n min: -2,\n max: 2,\n step: 0.01,\n default: 0,\n});\n\nconst seed = RangeConfigItem({\n param: \"seed\",\n title: CHAT_CONFIG.SEED.title,\n description: CHAT_CONFIG.SEED.description,\n min: 0,\n max: 1000000,\n step: 1,\n default: 0,\n});\n\nconst responseFormat = SelectStringConfigItem({\n param: \"response_format\",\n title: CHAT_CONFIG.RESPONSE_FORMAT.title,\n description: CHAT_CONFIG.RESPONSE_FORMAT.description,\n default: \"text\",\n choices: [\"text\", \"json_object\"],\n});\n\nconst toolChoice = SelectStringConfigItem({\n param: \"tool_choice\",\n title: \"Tool choice\",\n description:\n \"Controls which (if any) tool is called by the model. \\\n 'none' means the model will not call a function. \\\n 'auto' means the model can pick between generating a message or calling a tool.\",\n default: \"auto\",\n choices: [\"auto\", \"required\", \"none\"],\n});\n\nexport { \n frequencyPenalty, \n maxTokens, \n presencePenalty, \n seed, \n stop, \n temperature, \n toolChoice, \n topP, \n responseFormat \n};\n","import { z } from \"zod\";\n\nimport {\n frequencyPenalty,\n maxTokens,\n presencePenalty,\n seed,\n stop,\n temperature,\n toolChoice,\n topP,\n responseFormat,\n} from \"./common.config.chat-model.groq\";\n\nconst ChatModelBaseConfigSchema = (maxOutputTokens: number) =>\n z.object({\n temperature: temperature.schema,\n maxTokens: maxTokens(maxOutputTokens).schema,\n stop: stop.schema,\n topP: topP.schema,\n frequencyPenalty: frequencyPenalty.schema,\n presencePenalty: presencePenalty.schema,\n seed: seed.schema.transform((value) => (value === 0 ? undefined : value)),\n responseFormat: responseFormat.schema,\n toolChoice: toolChoice.schema,\n });\n\nconst ChatModelBaseConfigDef = (maxOutputTokens: number) =>\n ({\n temperature: temperature.def,\n maxTokens: maxTokens(maxOutputTokens).def,\n stop: stop.def,\n topP: topP.def,\n frequencyPenalty: frequencyPenalty.def,\n presencePenalty: presencePenalty.def,\n seed: seed.def,\n responseFormat: responseFormat.def,\n toolChoice: toolChoice.def,\n }) as const;\n\nexport { ChatModelBaseConfigDef, ChatModelBaseConfigSchema };\n","import {\n ChatModelBaseConfigDef,\n ChatModelBaseConfigSchema,\n} from \"./chat-model\";\n\nconst GroqChatModelConfigs = {\n base: (maxOutputTokens: number) => ({\n def: ChatModelBaseConfigDef(maxOutputTokens),\n schema: ChatModelBaseConfigSchema(maxOutputTokens),\n }),\n} as const;\n\nexport { GroqChatModelConfigs };\n","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Mixtral_8x7bLiteral = \"mixtral-8x7b-32768\" as const;\n// https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1\nconst Mixtral_8x7bDescription = \n \"The Mixtral-8x7B Large Language Model (LLM) is a pretrained generative Sparse Mixture of Experts.\";\n\nconst Mixtral_8x7bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Mixtral_8x7bLiteral,\n description: Mixtral_8x7bDescription,\n maxInputTokens: 32768,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Mixtral_8x7bOptions = BaseChatModelOptions;\ntype Mixtral_8x7bOptionsType = z.infer<typeof Mixtral_8x7bOptions>;\n\nclass Mixtral_8x7b extends BaseChatModelGroq {\n constructor(options: Mixtral_8x7bOptionsType) {\n super(Mixtral_8x7bSchema, options);\n }\n}\n\nexport {\n Mixtral_8x7b,\n Mixtral_8x7bOptions,\n Mixtral_8x7bSchema,\n Mixtral_8x7bLiteral,\n type Mixtral_8x7bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst LlamaGuard_3_8bLiteral = \"llama-guard-3-8b\" as const;\n// https://huggingface.co/meta-llama/Llama-Guard-3-8B\nconst LlamaGuard_3_8bDescription = \n \"Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification.\";\n\nconst LlamaGuard_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: LlamaGuard_3_8bLiteral,\n description: LlamaGuard_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst LlamaGuard_3_8bOptions = BaseChatModelOptions;\ntype LlamaGuard_3_8bOptionsType = z.infer<typeof LlamaGuard_3_8bOptions>;\n\nclass LlamaGuard_3_8b extends BaseChatModelGroq {\n constructor(options: LlamaGuard_3_8bOptionsType) {\n super(LlamaGuard_3_8bSchema, options);\n }\n}\n\nexport {\n LlamaGuard_3_8b,\n LlamaGuard_3_8bOptions,\n LlamaGuard_3_8bSchema,\n LlamaGuard_3_8bLiteral,\n type LlamaGuard_3_8bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8bLiteral = \"llama3-8b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct\nconst Llama_3_8bDescription = \n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8bLiteral,\n description: Llama_3_8bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8bOptions = BaseChatModelOptions;\ntype Llama_3_8bOptionsType = z.infer<typeof Llama_3_8bOptions>;\n\nclass Llama_3_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_8bOptionsType) {\n super(Llama_3_8bSchema, options);\n }\n}\n\nexport {\n Llama_3_8b,\n Llama_3_8bOptions,\n Llama_3_8bSchema,\n Llama_3_8bLiteral,\n type Llama_3_8bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelTextToolModalitiesEnum, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70bLiteral = \"llama3-70b-8192\" as const;\n// https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct\nconst Llama_3_70bDescription = \n \"The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of \\\n the available open source chat models on common industry benchmarks.\";\n\nconst Llama_3_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70bLiteral,\n description: Llama_3_70bDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70bOptions = BaseChatModelOptions;\ntype Llama_3_70bOptionsType = z.infer<typeof Llama_3_70bOptions>;\n\nclass Llama_3_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_70bOptionsType) {\n super(Llama_3_70bSchema, options);\n }\n}\n\nexport {\n Llama_3_70b,\n Llama_3_70bOptions,\n Llama_3_70bSchema,\n Llama_3_70bLiteral,\n type Llama_3_70bOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_70b_Tool_UseLiteral = \"llama3-groq-70b-8192-tool-use-preview\" as const;\n// https://huggingface.co/Groq/Llama-3-Groq-70B-Tool-Use\nconst Llama_3_70b_Tool_UseDescription = \n \"This is the 70B parameter version of the Llama 3 Groq Tool Use model, \\\n specifically designed for advanced tool use and function calling tasks.\";\n\nconst Llama_3_70b_Tool_UseSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_70b_Tool_UseLiteral,\n description: Llama_3_70b_Tool_UseDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_70b_Tool_Use_Options = BaseChatModelOptions;\ntype Llama_3_70b_Tool_Use_OptionsType = z.infer<typeof Llama_3_70b_Tool_Use_Options>;\n\nclass Llama_3_70b_Tool_Use extends BaseChatModelGroq {\n constructor(options: Llama_3_70b_Tool_Use_OptionsType) {\n super(Llama_3_70b_Tool_UseSchema, options);\n }\n}\n\nexport {\n Llama_3_70b_Tool_Use,\n Llama_3_70b_Tool_Use_Options,\n Llama_3_70b_Tool_UseSchema,\n Llama_3_70b_Tool_UseLiteral,\n type Llama_3_70b_Tool_Use_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_8b_Tool_UseLiteral = \"llama3-groq-8b-8192-tool-use-preview\" as const;\n// https://huggingface.co/Groq/Llama-3-Groq-8B-Tool-Use\nconst Llama_3_8b_Tool_UseDescription = \n \"This is the 8B parameter version of the Llama 3 Groq Tool Use model, \\\n specifically designed for advanced tool use and function calling tasks.\";\n\nconst Llama_3_8b_Tool_UseSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_8b_Tool_UseLiteral,\n description: Llama_3_8b_Tool_UseDescription,\n maxInputTokens: 8192,\n maxOutputTokens: 4096,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(4096).def,\n schema: GroqChatModelConfigs.base(4096).schema,\n },\n});\n\nconst Llama_3_8b_Tool_Use_Options = BaseChatModelOptions;\ntype Llama_3_8b_Tool_Use_OptionsType = z.infer<typeof Llama_3_8b_Tool_Use_Options>;\n\nclass Llama_3_8b_Tool_Use extends BaseChatModelGroq {\n constructor(options: Llama_3_8b_Tool_Use_OptionsType) {\n super(Llama_3_8b_Tool_UseSchema, options);\n }\n}\n\nexport {\n Llama_3_8b_Tool_Use,\n Llama_3_8b_Tool_Use_Options,\n Llama_3_8b_Tool_UseSchema,\n Llama_3_8b_Tool_UseLiteral,\n type Llama_3_8b_Tool_Use_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelModalities,\n OpenAIChatModelModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_11b_VisionLiteral = \"llama-3.2-11b-vision-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-11B-Vision\nconst Llama_3_2_11b_VisionDescription = \n \"The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, \\\n and answering general questions about an image. \\\n The models outperform many of the available open source and closed multimodal models on common industry benchmarks.\";\n\nconst Llama_3_2_11b_VisionSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelModalitiesEnum).parse({\n name: Llama_3_2_11b_VisionLiteral,\n description: Llama_3_2_11b_VisionDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_11b_VisionOptions = BaseChatModelOptions;\ntype Llama_3_2_11b_VisionOptionsType = z.infer<typeof Llama_3_2_11b_VisionOptions>;\n\nclass Llama_3_2_11b_Vision extends BaseChatModelGroq {\n constructor(options: Llama_3_2_11b_VisionOptionsType) {\n super(Llama_3_2_11b_VisionSchema, options);\n }\n}\n\nexport {\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n type Llama_3_2_11b_VisionOptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_3bLiteral = \"llama-3.2-3b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-3B\nconst Llama_3_2_3bDescription = \n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_3bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_3bLiteral,\n description: Llama_3_2_3bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_3b_Options = BaseChatModelOptions;\ntype Llama_3_2_3b_OptionsType = z.infer<typeof Llama_3_2_3b_Options>;\n\nclass Llama_3_2_3b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_3b_OptionsType) {\n super(Llama_3_2_3bSchema, options);\n }\n}\n\nexport {\n Llama_3_2_3b,\n Llama_3_2_3b_Options,\n Llama_3_2_3bSchema,\n Llama_3_2_3bLiteral,\n type Llama_3_2_3b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_2_1bLiteral = \"llama-3.2-1b-preview\" as const;\n// https://huggingface.co/meta-llama/Llama-3.2-1B\nconst Llama_3_2_1bDescription = \n \"The Llama 3.2 instruction-tuned text only models are optimized for multilingual dialogue use cases, including agentic retrieval and \\\n summarization tasks. They outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_2_1bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_2_1bLiteral,\n description: Llama_3_2_1bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_2_1b_Options = BaseChatModelOptions;\ntype Llama_3_2_1b_OptionsType = z.infer<typeof Llama_3_2_1b_Options>;\n\nclass Llama_3_2_1b extends BaseChatModelGroq {\n constructor(options: Llama_3_2_1b_OptionsType) {\n super(Llama_3_2_1bSchema, options);\n }\n}\n\nexport {\n Llama_3_2_1b,\n Llama_3_2_1b_Options,\n Llama_3_2_1bSchema,\n Llama_3_2_1bLiteral,\n type Llama_3_2_1b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_70bLiteral = \"llama-3.1-70b-versatile\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_70bDescription = \n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_70bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_70bLiteral,\n description: Llama_3_1_70bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_70b_Options = BaseChatModelOptions;\ntype Llama_3_1_70b_OptionsType = z.infer<typeof Llama_3_1_70b_Options>;\n\nclass Llama_3_1_70b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_70b_OptionsType) {\n super(Llama_3_1_70bSchema, options);\n }\n}\n\nexport {\n Llama_3_1_70b,\n Llama_3_1_70b_Options,\n Llama_3_1_70bSchema,\n Llama_3_1_70bLiteral,\n type Llama_3_1_70b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { ChatModelSchema } from \"@adaline/provider\";\n\nimport { \n OpenAIChatModelRoles, \n OpenAIChatModelRolesMap,\n OpenAIChatModelTextToolModalities,\n OpenAIChatModelTextToolModalitiesEnum,\n} from \"@adaline/openai\";\n\nimport { GroqChatModelConfigs } from \"../../configs\";\nimport { BaseChatModelGroq, BaseChatModelOptions } from \"./base-chat-model.groq\";\n\nconst Llama_3_1_8bLiteral = \"llama-3.1-8b-instant\" as const;\n// https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md\nconst Llama_3_1_8bDescription = \n \"The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and \\\n outperform many of the available open source and closed chat models on common industry benchmarks.\";\n\nconst Llama_3_1_8bSchema = ChatModelSchema(OpenAIChatModelRoles, OpenAIChatModelTextToolModalitiesEnum).parse({\n name: Llama_3_1_8bLiteral,\n description: Llama_3_1_8bDescription,\n maxInputTokens: 128000,\n maxOutputTokens: 8192,\n roles: OpenAIChatModelRolesMap,\n modalities: OpenAIChatModelTextToolModalities,\n config: {\n def: GroqChatModelConfigs.base(8192).def,\n schema: GroqChatModelConfigs.base(8192).schema,\n },\n});\n\nconst Llama_3_1_8b_Options = BaseChatModelOptions;\ntype Llama_3_1_8b_OptionsType = z.infer<typeof Llama_3_1_8b_Options>;\n\nclass Llama_3_1_8b extends BaseChatModelGroq {\n constructor(options: Llama_3_1_8b_OptionsType) {\n super(Llama_3_1_8bSchema, options);\n }\n}\n\nexport {\n Llama_3_1_8b,\n Llama_3_1_8b_Options,\n Llama_3_1_8bSchema,\n Llama_3_1_8bLiteral,\n type Llama_3_1_8b_OptionsType,\n}","import { z } from \"zod\";\n\nimport { \n ChatModelV1, \n ChatModelSchemaType, \n EmbeddingModelV1, \n EmbeddingModelSchemaType, \n ProviderV1, \n ProviderError \n} from \"@adaline/provider\";\n\nimport {\n LlamaGuard_3_8b,\n LlamaGuard_3_8bOptions,\n LlamaGuard_3_8bSchema,\n LlamaGuard_3_8bLiteral,\n Llama_3_8b,\n Llama_3_8bOptions,\n Llama_3_8bSchema,\n Llama_3_8bLiteral,\n Llama_3_70b,\n Llama_3_70bOptions,\n Llama_3_70bSchema,\n Llama_3_70bLiteral,\n Llama_3_1_8b,\n Llama_3_1_8b_Options,\n Llama_3_1_8bSchema,\n Llama_3_1_8bLiteral,\n Llama_3_1_70b,\n Llama_3_1_70b_Options,\n Llama_3_1_70bSchema,\n Llama_3_1_70bLiteral,\n Llama_3_8b_Tool_Use,\n Llama_3_8b_Tool_Use_Options,\n Llama_3_8b_Tool_UseSchema,\n Llama_3_8b_Tool_UseLiteral,\n Llama_3_70b_Tool_Use,\n Llama_3_70b_Tool_Use_Options,\n Llama_3_70b_Tool_UseSchema,\n Llama_3_70b_Tool_UseLiteral,\n Llama_3_2_11b_Vision,\n Llama_3_2_11b_VisionOptions,\n Llama_3_2_11b_VisionSchema,\n Llama_3_2_11b_VisionLiteral,\n Llama_3_2_3b,\n Llama_3_2_3b_Options,\n Llama_3_2_3bSchema,\n Llama_3_2_3bLiteral,\n Llama_3_2_1b,\n Llama_3_2_1b_Options,\n Llama_3_2_1bSchema,\n Llama_3_2_1bLiteral,\n Mixtral_8x7b,\n Mixtral_8x7bOptions,\n Mixtral_8x7bSchema,\n Mixtral_8x7bLiteral,\n} from \"./../models\";\n\nconst ProviderLiteral = \"groq\";\nclass Groq<O extends Record<string, any> = Record<string, any>> implements ProviderV1<O> {\n readonly version = \"v1\" as const;\n readonly name = ProviderLiteral;\n static readonly baseUrl = \"https://api.groq.com/openai/v1\";\n\n private readonly chatModelFactories: \n Record<string, { \n model: { new (options: any): ChatModelV1 },\n modelOptions: z.ZodType<any>,\n modelSchema: ChatModelSchemaType\n }> = {\n [LlamaGuard_3_8bLiteral]: {\n model: LlamaGuard_3_8b,\n modelOptions: LlamaGuard_3_8bOptions,\n modelSchema: LlamaGuard_3_8bSchema,\n },\n [Llama_3_8bLiteral]: {\n model: Llama_3_8b,\n modelOptions: Llama_3_8bOptions,\n modelSchema: Llama_3_8bSchema,\n },\n [Llama_3_70bLiteral]: {\n model: Llama_3_70b,\n modelOptions: Llama_3_70bOptions,\n modelSchema: Llama_3_70bSchema,\n },\n [Llama_3_1_8bLiteral]: {\n model: Llama_3_1_8b,\n modelOptions: Llama_3_1_8b_Options,\n modelSchema: Llama_3_1_8bSchema,\n },\n [Llama_3_8b_Tool_UseLiteral]: {\n model: Llama_3_8b_Tool_Use,\n modelOptions: Llama_3_8b_Tool_Use_Options,\n modelSchema: Llama_3_8b_Tool_UseSchema,\n },\n [Llama_3_1_70bLiteral]: {\n model: Llama_3_1_70b,\n modelOptions: Llama_3_1_70b_Options,\n modelSchema: Llama_3_1_70bSchema,\n },\n [Llama_3_70b_Tool_UseLiteral]: {\n model: Llama_3_70b_Tool_Use,\n modelOptions: Llama_3_70b_Tool_Use_Options,\n modelSchema: Llama_3_70b_Tool_UseSchema,\n },\n [Llama_3_2_11b_VisionLiteral]: {\n model: Llama_3_2_11b_Vision,\n modelOptions: Llama_3_2_11b_VisionOptions,\n modelSchema: Llama_3_2_11b_VisionSchema,\n },\n [Llama_3_2_3bLiteral]: {\n model: Llama_3_2_3b,\n modelOptions: Llama_3_2_3b_Options,\n modelSchema: Llama_3_2_3bSchema,\n },\n [Llama_3_2_1bLiteral]: {\n model: Llama_3_2_1b,\n modelOptions: Llama_3_2_1b_Options,\n modelSchema: Llama_3_2_1bSchema,\n },\n [Mixtral_8x7bLiteral]: {\n model: Mixtral_8x7b,\n modelOptions: Mixtral_8x7bOptions,\n modelSchema: Mixtral_8x7bSchema,\n },\n };\n\n private readonly embeddingModelFactories: \n Record<string, { \n model: { new (options: any): EmbeddingModelV1 },\n modelOptions: z.ZodType<any>,\n modelSchema: EmbeddingModelSchemaType\n }> = {};\n\n chatModelLiterals(): string[] {\n return Object.keys(this.chatModelFactories);\n }\n\n chatModel(name: string, options: O): ChatModelV1 {\n if (!(name in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${name} not found`,\n cause: new Error(`Groq chat model: ${name} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.chatModelFactories[name].model;\n const modelOptions = {\n ...options,\n };\n const parsedOptions = this.chatModelFactories[name].modelOptions.parse(modelOptions);\n return new model(parsedOptions);\n }\n\n chatModelSchema(name: string): ChatModelSchemaType {\n if (!(name in this.chatModelFactories)) {\n throw new ProviderError({\n info: `Groq chat model: ${name} not found`,\n cause: new Error(`Groq chat model: ${name} not found, available chat models: \n ${this.chatModelLiterals().join(\", \")}`),\n });\n }\n return this.chatModelFactories[name].modelSchema;\n }\n\n chatModelSchemas(): Record<string, ChatModelSchemaType> {\n return Object.keys(this.chatModelFactories).reduce((acc, key) => {\n acc[key] = this.chatModelFactories[key].modelSchema;\n return acc;\n }, {} as Record<string, ChatModelSchemaType>);\n }\n\n embeddingModelLiterals(): string[] {\n return Object.keys(this.embeddingModelFactories);\n }\n\n embeddingModel(name: string, options: O): EmbeddingModelV1 {\n if (!(name in this.embeddingModelFactories)) {\n throw new ProviderError({\n info: `Groq embedding model: ${name} not found`,\n cause: new Error(`Groq embedding model: ${name} not found, available embedding models: \n ${this.embeddingModelLiterals().join(\", \")}`),\n });\n }\n\n const model = this.embeddingModelFactories[name].model;\n const modelOptions = {\n ...options,\n };\n const parsedOptions = this.embeddingModelFactories[name].modelOptions.parse(modelOptions);\n return new model(parsedOptions);\n }\n\n embeddingModelSchema(name: string): EmbeddingModelSchemaType {\n if (!(name in this.embeddingModelFactories)) {\n throw new ProviderError({\n info: `Groq embedding model: ${name} not found`,\n cause: new Error(`Groq embedding model: ${name} not found, available embedding models: \n ${this.embeddingModelLiterals().join(\", \")}`),\n });\n }\n return this.embeddingModelFactories[name].modelSchema;\n }\n\n embeddingModelSchemas(): Record<string, EmbeddingModelSchemaType> {\n return Object.keys(this.embeddingModelFactories).reduce((acc, key) => {\n acc[key] = this.embeddingModelFactories[key].modelSchema;\n return acc;\n }, {} as Record<string, EmbeddingModelSchemaType>);\n }\n}\n\nexport { Groq };"]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@adaline/groq",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.1",
|
|
4
4
|
"license": "MIT",
|
|
5
5
|
"sideEffects": false,
|
|
6
6
|
"private": false,
|
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
"zod": "^3.23.8",
|
|
49
49
|
"@adaline/provider": "0.12.0",
|
|
50
50
|
"@adaline/types": "0.10.0",
|
|
51
|
-
"@adaline/openai": "0.
|
|
51
|
+
"@adaline/openai": "0.15.0"
|
|
52
52
|
},
|
|
53
53
|
"devDependencies": {
|
|
54
54
|
"tsup": "^8.2.2",
|