@ax-llm/ax 11.0.44 → 11.0.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs +257 -35
- package/index.cjs.map +1 -1
- package/index.d.cts +65 -9
- package/index.d.ts +65 -9
- package/index.js +245 -29
- package/index.js.map +1 -1
- package/package.json +1 -1
package/index.js
CHANGED
|
@@ -916,15 +916,16 @@ var AxBaseAI = class {
|
|
|
916
916
|
...this.aiImpl.getModelConfig(),
|
|
917
917
|
...req.modelConfig
|
|
918
918
|
};
|
|
919
|
+
if (options?.thinkingTokenBudget && !this.getFeatures(model).hasThinkingBudget) {
|
|
920
|
+
throw new Error(
|
|
921
|
+
`Model ${model} does not support thinkingTokenBudget.`
|
|
922
|
+
);
|
|
923
|
+
}
|
|
919
924
|
modelConfig.stream = (options?.stream !== void 0 ? options.stream : modelConfig.stream) ?? true;
|
|
920
925
|
const canStream = this.getFeatures(model).streaming;
|
|
921
926
|
if (!canStream) {
|
|
922
927
|
modelConfig.stream = false;
|
|
923
928
|
}
|
|
924
|
-
const canSetThinkingTokenBudget = this.getFeatures(model).thinkingTokenBudget;
|
|
925
|
-
if (!canSetThinkingTokenBudget && options?.thinkingTokenBudget) {
|
|
926
|
-
throw new Error("Thinking token budget is not supported for this model");
|
|
927
|
-
}
|
|
928
929
|
if (this.tracer) {
|
|
929
930
|
return await this.tracer?.startActiveSpan(
|
|
930
931
|
"AI Chat Request",
|
|
@@ -1022,6 +1023,13 @@ var AxBaseAI = class {
|
|
|
1022
1023
|
const wrappedRespFn = (state) => (resp) => {
|
|
1023
1024
|
const res2 = respFn(resp, state);
|
|
1024
1025
|
res2.sessionId = options?.sessionId;
|
|
1026
|
+
if (options?.hideThought) {
|
|
1027
|
+
res2.results.forEach((result) => {
|
|
1028
|
+
if (result.thought) {
|
|
1029
|
+
result.thought = void 0;
|
|
1030
|
+
}
|
|
1031
|
+
});
|
|
1032
|
+
}
|
|
1025
1033
|
if (!res2.modelUsage) {
|
|
1026
1034
|
res2.modelUsage = {
|
|
1027
1035
|
ai: this.name,
|
|
@@ -1059,6 +1067,13 @@ var AxBaseAI = class {
|
|
|
1059
1067
|
}
|
|
1060
1068
|
const res = this.aiImpl.createChatResp(rv);
|
|
1061
1069
|
res.sessionId = options?.sessionId;
|
|
1070
|
+
if (options?.hideThought) {
|
|
1071
|
+
res.results.forEach((result) => {
|
|
1072
|
+
if (result.thought) {
|
|
1073
|
+
result.thought = void 0;
|
|
1074
|
+
}
|
|
1075
|
+
});
|
|
1076
|
+
}
|
|
1062
1077
|
if (!res.modelUsage) {
|
|
1063
1078
|
const tokenUsage = this.aiImpl.getTokenUsage();
|
|
1064
1079
|
if (tokenUsage) {
|
|
@@ -1846,25 +1861,29 @@ var axModelInfoOpenAI = [
|
|
|
1846
1861
|
name: "o1" /* O1 */,
|
|
1847
1862
|
currency: "usd",
|
|
1848
1863
|
promptTokenCostPer1M: 15,
|
|
1849
|
-
completionTokenCostPer1M: 60
|
|
1864
|
+
completionTokenCostPer1M: 60,
|
|
1865
|
+
hasThinkingBudget: true
|
|
1850
1866
|
},
|
|
1851
1867
|
{
|
|
1852
1868
|
name: "o1-mini" /* O1Mini */,
|
|
1853
1869
|
currency: "usd",
|
|
1854
1870
|
promptTokenCostPer1M: 1.1,
|
|
1855
|
-
completionTokenCostPer1M: 14.4
|
|
1871
|
+
completionTokenCostPer1M: 14.4,
|
|
1872
|
+
hasThinkingBudget: true
|
|
1856
1873
|
},
|
|
1857
1874
|
{
|
|
1858
1875
|
name: "o3-mini" /* O3Mini */,
|
|
1859
1876
|
currency: "usd",
|
|
1860
1877
|
promptTokenCostPer1M: 1.1,
|
|
1861
|
-
completionTokenCostPer1M: 4.4
|
|
1878
|
+
completionTokenCostPer1M: 4.4,
|
|
1879
|
+
hasThinkingBudget: true
|
|
1862
1880
|
},
|
|
1863
1881
|
{
|
|
1864
1882
|
name: "o4-mini" /* O4Mini */,
|
|
1865
1883
|
currency: "usd",
|
|
1866
1884
|
promptTokenCostPer1M: 1.1,
|
|
1867
|
-
completionTokenCostPer1M: 4.4
|
|
1885
|
+
completionTokenCostPer1M: 4.4,
|
|
1886
|
+
hasThinkingBudget: true
|
|
1868
1887
|
},
|
|
1869
1888
|
{
|
|
1870
1889
|
name: "gpt-4" /* GPT4 */,
|
|
@@ -1954,9 +1973,10 @@ var axAIOpenAIFastConfig = () => ({
|
|
|
1954
1973
|
model: "gpt-4.1-mini" /* GPT41Mini */
|
|
1955
1974
|
});
|
|
1956
1975
|
var AxAIOpenAIImpl = class {
|
|
1957
|
-
constructor(config, streamingUsage) {
|
|
1976
|
+
constructor(config, streamingUsage, chatReqUpdater) {
|
|
1958
1977
|
this.config = config;
|
|
1959
1978
|
this.streamingUsage = streamingUsage;
|
|
1979
|
+
this.chatReqUpdater = chatReqUpdater;
|
|
1960
1980
|
}
|
|
1961
1981
|
tokensUsed;
|
|
1962
1982
|
getTokenUsage() {
|
|
@@ -1976,7 +1996,7 @@ var AxAIOpenAIImpl = class {
|
|
|
1976
1996
|
stream: config.stream
|
|
1977
1997
|
};
|
|
1978
1998
|
}
|
|
1979
|
-
createChatReq(req,
|
|
1999
|
+
createChatReq(req, config) {
|
|
1980
2000
|
const model = req.model;
|
|
1981
2001
|
if (!req.chatPrompt || req.chatPrompt.length === 0) {
|
|
1982
2002
|
throw new Error("Chat prompt is empty");
|
|
@@ -1996,12 +2016,11 @@ var AxAIOpenAIImpl = class {
|
|
|
1996
2016
|
const messages = createMessages2(req);
|
|
1997
2017
|
const frequencyPenalty = req.modelConfig?.frequencyPenalty ?? this.config.frequencyPenalty;
|
|
1998
2018
|
const stream = req.modelConfig?.stream ?? this.config.stream;
|
|
1999
|
-
const reasoningEffort = isReasoningModel(model) ? this.config.reasoningEffort : void 0;
|
|
2000
2019
|
const store = this.config.store;
|
|
2001
|
-
|
|
2020
|
+
let reqValue = {
|
|
2002
2021
|
model,
|
|
2003
2022
|
messages,
|
|
2004
|
-
response_format: this.config?.responseFormat ? { type: this.config
|
|
2023
|
+
response_format: this.config?.responseFormat ? { type: this.config.responseFormat } : void 0,
|
|
2005
2024
|
tools,
|
|
2006
2025
|
tool_choice: toolsChoice,
|
|
2007
2026
|
max_completion_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens ?? 500,
|
|
@@ -2013,9 +2032,55 @@ var AxAIOpenAIImpl = class {
|
|
|
2013
2032
|
logit_bias: this.config.logitBias,
|
|
2014
2033
|
...frequencyPenalty ? { frequency_penalty: frequencyPenalty } : {},
|
|
2015
2034
|
...stream && this.streamingUsage ? { stream: true, stream_options: { include_usage: true } } : {},
|
|
2016
|
-
...reasoningEffort ? { reasoning_effort: reasoningEffort } : {},
|
|
2017
2035
|
...store ? { store } : {}
|
|
2018
2036
|
};
|
|
2037
|
+
if (this.config.reasoningEffort) {
|
|
2038
|
+
reqValue.reasoning_effort = this.config.reasoningEffort;
|
|
2039
|
+
}
|
|
2040
|
+
if (this.config.webSearchOptions) {
|
|
2041
|
+
reqValue.web_search_options = {
|
|
2042
|
+
...this.config.webSearchOptions.searchContextSize && {
|
|
2043
|
+
search_context_size: this.config.webSearchOptions.searchContextSize
|
|
2044
|
+
},
|
|
2045
|
+
...this.config.webSearchOptions.userLocation && {
|
|
2046
|
+
user_location: {
|
|
2047
|
+
approximate: {
|
|
2048
|
+
type: "approximate",
|
|
2049
|
+
...this.config.webSearchOptions.userLocation.approximate.city && {
|
|
2050
|
+
city: this.config.webSearchOptions.userLocation.approximate.city
|
|
2051
|
+
},
|
|
2052
|
+
...this.config.webSearchOptions.userLocation.approximate.country && {
|
|
2053
|
+
country: this.config.webSearchOptions.userLocation.approximate.country
|
|
2054
|
+
},
|
|
2055
|
+
...this.config.webSearchOptions.userLocation.approximate.region && {
|
|
2056
|
+
region: this.config.webSearchOptions.userLocation.approximate.region
|
|
2057
|
+
},
|
|
2058
|
+
...this.config.webSearchOptions.userLocation.approximate.timezone && {
|
|
2059
|
+
timezone: this.config.webSearchOptions.userLocation.approximate.timezone
|
|
2060
|
+
}
|
|
2061
|
+
}
|
|
2062
|
+
}
|
|
2063
|
+
}
|
|
2064
|
+
};
|
|
2065
|
+
}
|
|
2066
|
+
if (config.thinkingTokenBudget) {
|
|
2067
|
+
switch (config.thinkingTokenBudget) {
|
|
2068
|
+
case "minimal":
|
|
2069
|
+
reqValue.reasoning_effort = "low";
|
|
2070
|
+
break;
|
|
2071
|
+
case "low":
|
|
2072
|
+
reqValue.reasoning_effort = "medium";
|
|
2073
|
+
break;
|
|
2074
|
+
case "medium":
|
|
2075
|
+
reqValue.reasoning_effort = "high";
|
|
2076
|
+
break;
|
|
2077
|
+
case "high":
|
|
2078
|
+
reqValue.reasoning_effort = "high";
|
|
2079
|
+
}
|
|
2080
|
+
}
|
|
2081
|
+
if (this.chatReqUpdater) {
|
|
2082
|
+
reqValue = this.chatReqUpdater(reqValue);
|
|
2083
|
+
}
|
|
2019
2084
|
return [apiConfig, reqValue];
|
|
2020
2085
|
}
|
|
2021
2086
|
createEmbedReq(req) {
|
|
@@ -2058,6 +2123,7 @@ var AxAIOpenAIImpl = class {
|
|
|
2058
2123
|
return {
|
|
2059
2124
|
id: `${choice.index}`,
|
|
2060
2125
|
content: choice.message.content,
|
|
2126
|
+
thought: choice.message.reasoning_content,
|
|
2061
2127
|
functionCalls,
|
|
2062
2128
|
finishReason
|
|
2063
2129
|
};
|
|
@@ -2080,7 +2146,12 @@ var AxAIOpenAIImpl = class {
|
|
|
2080
2146
|
}
|
|
2081
2147
|
const results = choices.map(
|
|
2082
2148
|
({
|
|
2083
|
-
delta: {
|
|
2149
|
+
delta: {
|
|
2150
|
+
content,
|
|
2151
|
+
role,
|
|
2152
|
+
tool_calls: toolCalls,
|
|
2153
|
+
reasoning_content: thought
|
|
2154
|
+
},
|
|
2084
2155
|
finish_reason: oaiFinishReason
|
|
2085
2156
|
}) => {
|
|
2086
2157
|
const finishReason = mapFinishReason2(oaiFinishReason);
|
|
@@ -2101,6 +2172,7 @@ var AxAIOpenAIImpl = class {
|
|
|
2101
2172
|
return {
|
|
2102
2173
|
content,
|
|
2103
2174
|
role,
|
|
2175
|
+
thought,
|
|
2104
2176
|
functionCalls,
|
|
2105
2177
|
finishReason,
|
|
2106
2178
|
id
|
|
@@ -2198,14 +2270,17 @@ var AxAIOpenAIBase = class extends AxBaseAI {
|
|
|
2198
2270
|
options,
|
|
2199
2271
|
apiURL,
|
|
2200
2272
|
modelInfo,
|
|
2201
|
-
models
|
|
2273
|
+
models,
|
|
2274
|
+
chatReqUpdater,
|
|
2275
|
+
supportFor
|
|
2202
2276
|
}) {
|
|
2203
2277
|
if (!apiKey || apiKey === "") {
|
|
2204
2278
|
throw new Error("OpenAI API key not set");
|
|
2205
2279
|
}
|
|
2206
2280
|
const aiImpl = new AxAIOpenAIImpl(
|
|
2207
2281
|
config,
|
|
2208
|
-
options?.streamingUsage ?? true
|
|
2282
|
+
options?.streamingUsage ?? true,
|
|
2283
|
+
chatReqUpdater
|
|
2209
2284
|
);
|
|
2210
2285
|
super(aiImpl, {
|
|
2211
2286
|
name: "OpenAI",
|
|
@@ -2217,16 +2292,19 @@ var AxAIOpenAIBase = class extends AxBaseAI {
|
|
|
2217
2292
|
embedModel: config.embedModel
|
|
2218
2293
|
},
|
|
2219
2294
|
options,
|
|
2220
|
-
supportFor: () => {
|
|
2221
|
-
|
|
2222
|
-
|
|
2295
|
+
supportFor: supportFor ?? ((model) => {
|
|
2296
|
+
const modelInf = modelInfo.find((m) => m.name === model);
|
|
2297
|
+
return {
|
|
2298
|
+
functions: true,
|
|
2299
|
+
streaming: true,
|
|
2300
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
2301
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false
|
|
2302
|
+
};
|
|
2303
|
+
}),
|
|
2223
2304
|
models
|
|
2224
2305
|
});
|
|
2225
2306
|
}
|
|
2226
2307
|
};
|
|
2227
|
-
var isReasoningModel = (model) => ["o1-mini" /* O1Mini */, "o1" /* O1 */, "o3-mini" /* O3Mini */].includes(
|
|
2228
|
-
model
|
|
2229
|
-
);
|
|
2230
2308
|
var AxAIOpenAI = class extends AxAIOpenAIBase {
|
|
2231
2309
|
constructor({
|
|
2232
2310
|
apiKey,
|
|
@@ -2237,6 +2315,15 @@ var AxAIOpenAI = class extends AxAIOpenAIBase {
|
|
|
2237
2315
|
if (!apiKey || apiKey === "") {
|
|
2238
2316
|
throw new Error("OpenAI API key not set");
|
|
2239
2317
|
}
|
|
2318
|
+
const supportForFn = (model) => {
|
|
2319
|
+
const modelInf = axModelInfoOpenAI.find((m) => m.name === model);
|
|
2320
|
+
return {
|
|
2321
|
+
functions: true,
|
|
2322
|
+
streaming: true,
|
|
2323
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
2324
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false
|
|
2325
|
+
};
|
|
2326
|
+
};
|
|
2240
2327
|
super({
|
|
2241
2328
|
apiKey,
|
|
2242
2329
|
config: {
|
|
@@ -2245,7 +2332,8 @@ var AxAIOpenAI = class extends AxAIOpenAIBase {
|
|
|
2245
2332
|
},
|
|
2246
2333
|
options,
|
|
2247
2334
|
modelInfo: axModelInfoOpenAI,
|
|
2248
|
-
models
|
|
2335
|
+
models,
|
|
2336
|
+
supportFor: supportForFn
|
|
2249
2337
|
});
|
|
2250
2338
|
super.setName("OpenAI");
|
|
2251
2339
|
}
|
|
@@ -2284,7 +2372,16 @@ var AxAIAzureOpenAI = class extends AxAIOpenAIBase {
|
|
|
2284
2372
|
config: _config,
|
|
2285
2373
|
options,
|
|
2286
2374
|
models,
|
|
2287
|
-
modelInfo: axModelInfoOpenAI
|
|
2375
|
+
modelInfo: axModelInfoOpenAI,
|
|
2376
|
+
supportFor: (model) => {
|
|
2377
|
+
const modelInf = axModelInfoOpenAI.find((m) => m.name === model);
|
|
2378
|
+
return {
|
|
2379
|
+
functions: true,
|
|
2380
|
+
streaming: true,
|
|
2381
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
2382
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false
|
|
2383
|
+
};
|
|
2384
|
+
}
|
|
2288
2385
|
});
|
|
2289
2386
|
const host = resourceName.includes("://") ? resourceName : `https://${resourceName}.openai.azure.com/`;
|
|
2290
2387
|
super.setName("Azure OpenAI");
|
|
@@ -2743,14 +2840,18 @@ var axModelInfoGoogleGemini = [
|
|
|
2743
2840
|
currency: "usd",
|
|
2744
2841
|
characterIsToken: false,
|
|
2745
2842
|
promptTokenCostPer1M: 2.5,
|
|
2746
|
-
completionTokenCostPer1M: 15
|
|
2843
|
+
completionTokenCostPer1M: 15,
|
|
2844
|
+
hasThinkingBudget: true,
|
|
2845
|
+
hasShowThoughts: true
|
|
2747
2846
|
},
|
|
2748
2847
|
{
|
|
2749
2848
|
name: "gemini-2.5-flash-preview-04-17" /* Gemini25Flash */,
|
|
2750
2849
|
currency: "usd",
|
|
2751
2850
|
characterIsToken: false,
|
|
2752
2851
|
promptTokenCostPer1M: 15,
|
|
2753
|
-
completionTokenCostPer1M: 3.5
|
|
2852
|
+
completionTokenCostPer1M: 3.5,
|
|
2853
|
+
hasThinkingBudget: true,
|
|
2854
|
+
hasShowThoughts: true
|
|
2754
2855
|
},
|
|
2755
2856
|
{
|
|
2756
2857
|
name: "gemini-2.0-flash" /* Gemini20Flash */,
|
|
@@ -3011,7 +3112,20 @@ var AxAIGoogleGeminiImpl = class {
|
|
|
3011
3112
|
thinkingConfig.thinkingBudget = this.config.thinking.thinkingTokenBudget;
|
|
3012
3113
|
}
|
|
3013
3114
|
if (config.thinkingTokenBudget) {
|
|
3014
|
-
|
|
3115
|
+
switch (config.thinkingTokenBudget) {
|
|
3116
|
+
case "minimal":
|
|
3117
|
+
thinkingConfig.thinkingBudget = 0;
|
|
3118
|
+
break;
|
|
3119
|
+
case "low":
|
|
3120
|
+
thinkingConfig.thinkingBudget = 1024;
|
|
3121
|
+
break;
|
|
3122
|
+
case "medium":
|
|
3123
|
+
thinkingConfig.thinkingBudget = 4096;
|
|
3124
|
+
break;
|
|
3125
|
+
case "high":
|
|
3126
|
+
thinkingConfig.thinkingBudget = 8192;
|
|
3127
|
+
break;
|
|
3128
|
+
}
|
|
3015
3129
|
}
|
|
3016
3130
|
const generationConfig = {
|
|
3017
3131
|
maxOutputTokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
|
|
@@ -3212,7 +3326,16 @@ var AxAIGoogleGemini = class extends AxBaseAI {
|
|
|
3212
3326
|
embedModel: _config.embedModel
|
|
3213
3327
|
},
|
|
3214
3328
|
options,
|
|
3215
|
-
supportFor:
|
|
3329
|
+
supportFor: (model) => {
|
|
3330
|
+
const modelInf = axModelInfoGoogleGemini.find((m) => m.name === model);
|
|
3331
|
+
return {
|
|
3332
|
+
functions: true,
|
|
3333
|
+
streaming: true,
|
|
3334
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
3335
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false,
|
|
3336
|
+
functionCot: false
|
|
3337
|
+
};
|
|
3338
|
+
},
|
|
3216
3339
|
models
|
|
3217
3340
|
});
|
|
3218
3341
|
}
|
|
@@ -3977,6 +4100,93 @@ var AxAI = class {
|
|
|
3977
4100
|
}
|
|
3978
4101
|
};
|
|
3979
4102
|
|
|
4103
|
+
// ai/x-grok/types.ts
|
|
4104
|
+
var AxAIGrokModel = /* @__PURE__ */ ((AxAIGrokModel2) => {
|
|
4105
|
+
AxAIGrokModel2["Grok3"] = "grok-3";
|
|
4106
|
+
AxAIGrokModel2["Grok3Mini"] = "grok-3-mini";
|
|
4107
|
+
AxAIGrokModel2["Grok3Fast"] = "grok-3-fast";
|
|
4108
|
+
AxAIGrokModel2["Grok3MiniFast"] = "grok-3-mini-fast";
|
|
4109
|
+
return AxAIGrokModel2;
|
|
4110
|
+
})(AxAIGrokModel || {});
|
|
4111
|
+
var AxAIGrokEmbedModels = /* @__PURE__ */ ((AxAIGrokEmbedModels3) => {
|
|
4112
|
+
AxAIGrokEmbedModels3["GrokEmbedSmall"] = "grok-embed-small";
|
|
4113
|
+
return AxAIGrokEmbedModels3;
|
|
4114
|
+
})(AxAIGrokEmbedModels || {});
|
|
4115
|
+
|
|
4116
|
+
// ai/x-grok/info.ts
|
|
4117
|
+
var axModelInfoGrok = [
|
|
4118
|
+
{
|
|
4119
|
+
name: "grok-3" /* Grok3 */,
|
|
4120
|
+
currency: "USD",
|
|
4121
|
+
promptTokenCostPer1M: 3,
|
|
4122
|
+
completionTokenCostPer1M: 15
|
|
4123
|
+
},
|
|
4124
|
+
{
|
|
4125
|
+
name: "grok-3-mini" /* Grok3Mini */,
|
|
4126
|
+
currency: "USD",
|
|
4127
|
+
promptTokenCostPer1M: 0.3,
|
|
4128
|
+
completionTokenCostPer1M: 0.5,
|
|
4129
|
+
hasThinkingBudget: true
|
|
4130
|
+
},
|
|
4131
|
+
{
|
|
4132
|
+
name: "grok-3-fast" /* Grok3Fast */,
|
|
4133
|
+
currency: "USD",
|
|
4134
|
+
promptTokenCostPer1M: 5,
|
|
4135
|
+
completionTokenCostPer1M: 25
|
|
4136
|
+
},
|
|
4137
|
+
{
|
|
4138
|
+
name: "grok-3-mini-fast" /* Grok3MiniFast */,
|
|
4139
|
+
currency: "USD",
|
|
4140
|
+
promptTokenCostPer1M: 0.6,
|
|
4141
|
+
completionTokenCostPer1M: 4,
|
|
4142
|
+
hasThinkingBudget: true
|
|
4143
|
+
}
|
|
4144
|
+
];
|
|
4145
|
+
|
|
4146
|
+
// ai/x-grok/api.ts
|
|
4147
|
+
var axAIGrokDefaultConfig = () => structuredClone({
|
|
4148
|
+
model: "grok-3-mini" /* Grok3Mini */,
|
|
4149
|
+
...axBaseAIDefaultConfig()
|
|
4150
|
+
});
|
|
4151
|
+
var axAIGrokBestConfig = () => structuredClone({
|
|
4152
|
+
...axAIGrokDefaultConfig(),
|
|
4153
|
+
model: "grok-3" /* Grok3 */
|
|
4154
|
+
});
|
|
4155
|
+
var AxAIGrok = class extends AxAIOpenAIBase {
|
|
4156
|
+
constructor({
|
|
4157
|
+
apiKey,
|
|
4158
|
+
config,
|
|
4159
|
+
options,
|
|
4160
|
+
models
|
|
4161
|
+
}) {
|
|
4162
|
+
if (!apiKey || apiKey === "") {
|
|
4163
|
+
throw new Error("Grok API key not set");
|
|
4164
|
+
}
|
|
4165
|
+
const _config = {
|
|
4166
|
+
...axAIGrokDefaultConfig(),
|
|
4167
|
+
...config
|
|
4168
|
+
};
|
|
4169
|
+
super({
|
|
4170
|
+
apiKey,
|
|
4171
|
+
config: _config,
|
|
4172
|
+
options,
|
|
4173
|
+
apiURL: "https://api.x.ai/v1",
|
|
4174
|
+
modelInfo: axModelInfoGrok,
|
|
4175
|
+
models,
|
|
4176
|
+
supportFor: (model) => {
|
|
4177
|
+
const modelInf = axModelInfoGrok.find((m) => m.name === model);
|
|
4178
|
+
return {
|
|
4179
|
+
functions: true,
|
|
4180
|
+
streaming: true,
|
|
4181
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
4182
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false
|
|
4183
|
+
};
|
|
4184
|
+
}
|
|
4185
|
+
});
|
|
4186
|
+
super.setName("Grok");
|
|
4187
|
+
}
|
|
4188
|
+
};
|
|
4189
|
+
|
|
3980
4190
|
// dsp/generate.ts
|
|
3981
4191
|
import { ReadableStream as ReadableStream3 } from "node:stream/web";
|
|
3982
4192
|
import { SpanKind as SpanKind2 } from "@opentelemetry/api";
|
|
@@ -11611,6 +11821,9 @@ export {
|
|
|
11611
11821
|
AxAIGoogleGeminiModel,
|
|
11612
11822
|
AxAIGoogleGeminiSafetyCategory,
|
|
11613
11823
|
AxAIGoogleGeminiSafetyThreshold,
|
|
11824
|
+
AxAIGrok,
|
|
11825
|
+
AxAIGrokEmbedModels,
|
|
11826
|
+
AxAIGrokModel,
|
|
11614
11827
|
AxAIGroq,
|
|
11615
11828
|
AxAIGroqModel,
|
|
11616
11829
|
AxAIHuggingFace,
|
|
@@ -11691,6 +11904,8 @@ export {
|
|
|
11691
11904
|
axAIDeepSeekDefaultConfig,
|
|
11692
11905
|
axAIGoogleGeminiDefaultConfig,
|
|
11693
11906
|
axAIGoogleGeminiDefaultCreativeConfig,
|
|
11907
|
+
axAIGrokBestConfig,
|
|
11908
|
+
axAIGrokDefaultConfig,
|
|
11694
11909
|
axAIHuggingFaceCreativeConfig,
|
|
11695
11910
|
axAIHuggingFaceDefaultConfig,
|
|
11696
11911
|
axAIMistralBestConfig,
|
|
@@ -11712,6 +11927,7 @@ export {
|
|
|
11712
11927
|
axModelInfoCohere,
|
|
11713
11928
|
axModelInfoDeepSeek,
|
|
11714
11929
|
axModelInfoGoogleGemini,
|
|
11930
|
+
axModelInfoGrok,
|
|
11715
11931
|
axModelInfoGroq,
|
|
11716
11932
|
axModelInfoHuggingFace,
|
|
11717
11933
|
axModelInfoMistral,
|