@ax-llm/ax 11.0.44 → 11.0.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs +257 -35
- package/index.cjs.map +1 -1
- package/index.d.cts +65 -9
- package/index.d.ts +65 -9
- package/index.js +245 -29
- package/index.js.map +1 -1
- package/package.json +1 -1
package/index.cjs
CHANGED
|
@@ -46,6 +46,9 @@ __export(index_exports, {
|
|
|
46
46
|
AxAIGoogleGeminiModel: () => AxAIGoogleGeminiModel,
|
|
47
47
|
AxAIGoogleGeminiSafetyCategory: () => AxAIGoogleGeminiSafetyCategory,
|
|
48
48
|
AxAIGoogleGeminiSafetyThreshold: () => AxAIGoogleGeminiSafetyThreshold,
|
|
49
|
+
AxAIGrok: () => AxAIGrok,
|
|
50
|
+
AxAIGrokEmbedModels: () => AxAIGrokEmbedModels,
|
|
51
|
+
AxAIGrokModel: () => AxAIGrokModel,
|
|
49
52
|
AxAIGroq: () => AxAIGroq,
|
|
50
53
|
AxAIGroqModel: () => AxAIGroqModel,
|
|
51
54
|
AxAIHuggingFace: () => AxAIHuggingFace,
|
|
@@ -126,6 +129,8 @@ __export(index_exports, {
|
|
|
126
129
|
axAIDeepSeekDefaultConfig: () => axAIDeepSeekDefaultConfig,
|
|
127
130
|
axAIGoogleGeminiDefaultConfig: () => axAIGoogleGeminiDefaultConfig,
|
|
128
131
|
axAIGoogleGeminiDefaultCreativeConfig: () => axAIGoogleGeminiDefaultCreativeConfig,
|
|
132
|
+
axAIGrokBestConfig: () => axAIGrokBestConfig,
|
|
133
|
+
axAIGrokDefaultConfig: () => axAIGrokDefaultConfig,
|
|
129
134
|
axAIHuggingFaceCreativeConfig: () => axAIHuggingFaceCreativeConfig,
|
|
130
135
|
axAIHuggingFaceDefaultConfig: () => axAIHuggingFaceDefaultConfig,
|
|
131
136
|
axAIMistralBestConfig: () => axAIMistralBestConfig,
|
|
@@ -147,6 +152,7 @@ __export(index_exports, {
|
|
|
147
152
|
axModelInfoCohere: () => axModelInfoCohere,
|
|
148
153
|
axModelInfoDeepSeek: () => axModelInfoDeepSeek,
|
|
149
154
|
axModelInfoGoogleGemini: () => axModelInfoGoogleGemini,
|
|
155
|
+
axModelInfoGrok: () => axModelInfoGrok,
|
|
150
156
|
axModelInfoGroq: () => axModelInfoGroq,
|
|
151
157
|
axModelInfoHuggingFace: () => axModelInfoHuggingFace,
|
|
152
158
|
axModelInfoMistral: () => axModelInfoMistral,
|
|
@@ -1068,15 +1074,16 @@ var AxBaseAI = class {
|
|
|
1068
1074
|
...this.aiImpl.getModelConfig(),
|
|
1069
1075
|
...req.modelConfig
|
|
1070
1076
|
};
|
|
1077
|
+
if (options?.thinkingTokenBudget && !this.getFeatures(model).hasThinkingBudget) {
|
|
1078
|
+
throw new Error(
|
|
1079
|
+
`Model ${model} does not support thinkingTokenBudget.`
|
|
1080
|
+
);
|
|
1081
|
+
}
|
|
1071
1082
|
modelConfig.stream = (options?.stream !== void 0 ? options.stream : modelConfig.stream) ?? true;
|
|
1072
1083
|
const canStream = this.getFeatures(model).streaming;
|
|
1073
1084
|
if (!canStream) {
|
|
1074
1085
|
modelConfig.stream = false;
|
|
1075
1086
|
}
|
|
1076
|
-
const canSetThinkingTokenBudget = this.getFeatures(model).thinkingTokenBudget;
|
|
1077
|
-
if (!canSetThinkingTokenBudget && options?.thinkingTokenBudget) {
|
|
1078
|
-
throw new Error("Thinking token budget is not supported for this model");
|
|
1079
|
-
}
|
|
1080
1087
|
if (this.tracer) {
|
|
1081
1088
|
return await this.tracer?.startActiveSpan(
|
|
1082
1089
|
"AI Chat Request",
|
|
@@ -1174,6 +1181,13 @@ var AxBaseAI = class {
|
|
|
1174
1181
|
const wrappedRespFn = (state) => (resp) => {
|
|
1175
1182
|
const res2 = respFn(resp, state);
|
|
1176
1183
|
res2.sessionId = options?.sessionId;
|
|
1184
|
+
if (options?.hideThought) {
|
|
1185
|
+
res2.results.forEach((result) => {
|
|
1186
|
+
if (result.thought) {
|
|
1187
|
+
result.thought = void 0;
|
|
1188
|
+
}
|
|
1189
|
+
});
|
|
1190
|
+
}
|
|
1177
1191
|
if (!res2.modelUsage) {
|
|
1178
1192
|
res2.modelUsage = {
|
|
1179
1193
|
ai: this.name,
|
|
@@ -1211,6 +1225,13 @@ var AxBaseAI = class {
|
|
|
1211
1225
|
}
|
|
1212
1226
|
const res = this.aiImpl.createChatResp(rv);
|
|
1213
1227
|
res.sessionId = options?.sessionId;
|
|
1228
|
+
if (options?.hideThought) {
|
|
1229
|
+
res.results.forEach((result) => {
|
|
1230
|
+
if (result.thought) {
|
|
1231
|
+
result.thought = void 0;
|
|
1232
|
+
}
|
|
1233
|
+
});
|
|
1234
|
+
}
|
|
1214
1235
|
if (!res.modelUsage) {
|
|
1215
1236
|
const tokenUsage = this.aiImpl.getTokenUsage();
|
|
1216
1237
|
if (tokenUsage) {
|
|
@@ -1998,25 +2019,29 @@ var axModelInfoOpenAI = [
|
|
|
1998
2019
|
name: "o1" /* O1 */,
|
|
1999
2020
|
currency: "usd",
|
|
2000
2021
|
promptTokenCostPer1M: 15,
|
|
2001
|
-
completionTokenCostPer1M: 60
|
|
2022
|
+
completionTokenCostPer1M: 60,
|
|
2023
|
+
hasThinkingBudget: true
|
|
2002
2024
|
},
|
|
2003
2025
|
{
|
|
2004
2026
|
name: "o1-mini" /* O1Mini */,
|
|
2005
2027
|
currency: "usd",
|
|
2006
2028
|
promptTokenCostPer1M: 1.1,
|
|
2007
|
-
completionTokenCostPer1M: 14.4
|
|
2029
|
+
completionTokenCostPer1M: 14.4,
|
|
2030
|
+
hasThinkingBudget: true
|
|
2008
2031
|
},
|
|
2009
2032
|
{
|
|
2010
2033
|
name: "o3-mini" /* O3Mini */,
|
|
2011
2034
|
currency: "usd",
|
|
2012
2035
|
promptTokenCostPer1M: 1.1,
|
|
2013
|
-
completionTokenCostPer1M: 4.4
|
|
2036
|
+
completionTokenCostPer1M: 4.4,
|
|
2037
|
+
hasThinkingBudget: true
|
|
2014
2038
|
},
|
|
2015
2039
|
{
|
|
2016
2040
|
name: "o4-mini" /* O4Mini */,
|
|
2017
2041
|
currency: "usd",
|
|
2018
2042
|
promptTokenCostPer1M: 1.1,
|
|
2019
|
-
completionTokenCostPer1M: 4.4
|
|
2043
|
+
completionTokenCostPer1M: 4.4,
|
|
2044
|
+
hasThinkingBudget: true
|
|
2020
2045
|
},
|
|
2021
2046
|
{
|
|
2022
2047
|
name: "gpt-4" /* GPT4 */,
|
|
@@ -2106,9 +2131,10 @@ var axAIOpenAIFastConfig = () => ({
|
|
|
2106
2131
|
model: "gpt-4.1-mini" /* GPT41Mini */
|
|
2107
2132
|
});
|
|
2108
2133
|
var AxAIOpenAIImpl = class {
|
|
2109
|
-
constructor(config, streamingUsage) {
|
|
2134
|
+
constructor(config, streamingUsage, chatReqUpdater) {
|
|
2110
2135
|
this.config = config;
|
|
2111
2136
|
this.streamingUsage = streamingUsage;
|
|
2137
|
+
this.chatReqUpdater = chatReqUpdater;
|
|
2112
2138
|
}
|
|
2113
2139
|
tokensUsed;
|
|
2114
2140
|
getTokenUsage() {
|
|
@@ -2128,7 +2154,7 @@ var AxAIOpenAIImpl = class {
|
|
|
2128
2154
|
stream: config.stream
|
|
2129
2155
|
};
|
|
2130
2156
|
}
|
|
2131
|
-
createChatReq(req,
|
|
2157
|
+
createChatReq(req, config) {
|
|
2132
2158
|
const model = req.model;
|
|
2133
2159
|
if (!req.chatPrompt || req.chatPrompt.length === 0) {
|
|
2134
2160
|
throw new Error("Chat prompt is empty");
|
|
@@ -2148,12 +2174,11 @@ var AxAIOpenAIImpl = class {
|
|
|
2148
2174
|
const messages = createMessages2(req);
|
|
2149
2175
|
const frequencyPenalty = req.modelConfig?.frequencyPenalty ?? this.config.frequencyPenalty;
|
|
2150
2176
|
const stream = req.modelConfig?.stream ?? this.config.stream;
|
|
2151
|
-
const reasoningEffort = isReasoningModel(model) ? this.config.reasoningEffort : void 0;
|
|
2152
2177
|
const store = this.config.store;
|
|
2153
|
-
|
|
2178
|
+
let reqValue = {
|
|
2154
2179
|
model,
|
|
2155
2180
|
messages,
|
|
2156
|
-
response_format: this.config?.responseFormat ? { type: this.config
|
|
2181
|
+
response_format: this.config?.responseFormat ? { type: this.config.responseFormat } : void 0,
|
|
2157
2182
|
tools,
|
|
2158
2183
|
tool_choice: toolsChoice,
|
|
2159
2184
|
max_completion_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens ?? 500,
|
|
@@ -2165,9 +2190,55 @@ var AxAIOpenAIImpl = class {
|
|
|
2165
2190
|
logit_bias: this.config.logitBias,
|
|
2166
2191
|
...frequencyPenalty ? { frequency_penalty: frequencyPenalty } : {},
|
|
2167
2192
|
...stream && this.streamingUsage ? { stream: true, stream_options: { include_usage: true } } : {},
|
|
2168
|
-
...reasoningEffort ? { reasoning_effort: reasoningEffort } : {},
|
|
2169
2193
|
...store ? { store } : {}
|
|
2170
2194
|
};
|
|
2195
|
+
if (this.config.reasoningEffort) {
|
|
2196
|
+
reqValue.reasoning_effort = this.config.reasoningEffort;
|
|
2197
|
+
}
|
|
2198
|
+
if (this.config.webSearchOptions) {
|
|
2199
|
+
reqValue.web_search_options = {
|
|
2200
|
+
...this.config.webSearchOptions.searchContextSize && {
|
|
2201
|
+
search_context_size: this.config.webSearchOptions.searchContextSize
|
|
2202
|
+
},
|
|
2203
|
+
...this.config.webSearchOptions.userLocation && {
|
|
2204
|
+
user_location: {
|
|
2205
|
+
approximate: {
|
|
2206
|
+
type: "approximate",
|
|
2207
|
+
...this.config.webSearchOptions.userLocation.approximate.city && {
|
|
2208
|
+
city: this.config.webSearchOptions.userLocation.approximate.city
|
|
2209
|
+
},
|
|
2210
|
+
...this.config.webSearchOptions.userLocation.approximate.country && {
|
|
2211
|
+
country: this.config.webSearchOptions.userLocation.approximate.country
|
|
2212
|
+
},
|
|
2213
|
+
...this.config.webSearchOptions.userLocation.approximate.region && {
|
|
2214
|
+
region: this.config.webSearchOptions.userLocation.approximate.region
|
|
2215
|
+
},
|
|
2216
|
+
...this.config.webSearchOptions.userLocation.approximate.timezone && {
|
|
2217
|
+
timezone: this.config.webSearchOptions.userLocation.approximate.timezone
|
|
2218
|
+
}
|
|
2219
|
+
}
|
|
2220
|
+
}
|
|
2221
|
+
}
|
|
2222
|
+
};
|
|
2223
|
+
}
|
|
2224
|
+
if (config.thinkingTokenBudget) {
|
|
2225
|
+
switch (config.thinkingTokenBudget) {
|
|
2226
|
+
case "minimal":
|
|
2227
|
+
reqValue.reasoning_effort = "low";
|
|
2228
|
+
break;
|
|
2229
|
+
case "low":
|
|
2230
|
+
reqValue.reasoning_effort = "medium";
|
|
2231
|
+
break;
|
|
2232
|
+
case "medium":
|
|
2233
|
+
reqValue.reasoning_effort = "high";
|
|
2234
|
+
break;
|
|
2235
|
+
case "high":
|
|
2236
|
+
reqValue.reasoning_effort = "high";
|
|
2237
|
+
}
|
|
2238
|
+
}
|
|
2239
|
+
if (this.chatReqUpdater) {
|
|
2240
|
+
reqValue = this.chatReqUpdater(reqValue);
|
|
2241
|
+
}
|
|
2171
2242
|
return [apiConfig, reqValue];
|
|
2172
2243
|
}
|
|
2173
2244
|
createEmbedReq(req) {
|
|
@@ -2210,6 +2281,7 @@ var AxAIOpenAIImpl = class {
|
|
|
2210
2281
|
return {
|
|
2211
2282
|
id: `${choice.index}`,
|
|
2212
2283
|
content: choice.message.content,
|
|
2284
|
+
thought: choice.message.reasoning_content,
|
|
2213
2285
|
functionCalls,
|
|
2214
2286
|
finishReason
|
|
2215
2287
|
};
|
|
@@ -2232,7 +2304,12 @@ var AxAIOpenAIImpl = class {
|
|
|
2232
2304
|
}
|
|
2233
2305
|
const results = choices.map(
|
|
2234
2306
|
({
|
|
2235
|
-
delta: {
|
|
2307
|
+
delta: {
|
|
2308
|
+
content,
|
|
2309
|
+
role,
|
|
2310
|
+
tool_calls: toolCalls,
|
|
2311
|
+
reasoning_content: thought
|
|
2312
|
+
},
|
|
2236
2313
|
finish_reason: oaiFinishReason
|
|
2237
2314
|
}) => {
|
|
2238
2315
|
const finishReason = mapFinishReason2(oaiFinishReason);
|
|
@@ -2253,6 +2330,7 @@ var AxAIOpenAIImpl = class {
|
|
|
2253
2330
|
return {
|
|
2254
2331
|
content,
|
|
2255
2332
|
role,
|
|
2333
|
+
thought,
|
|
2256
2334
|
functionCalls,
|
|
2257
2335
|
finishReason,
|
|
2258
2336
|
id
|
|
@@ -2350,14 +2428,17 @@ var AxAIOpenAIBase = class extends AxBaseAI {
|
|
|
2350
2428
|
options,
|
|
2351
2429
|
apiURL,
|
|
2352
2430
|
modelInfo,
|
|
2353
|
-
models
|
|
2431
|
+
models,
|
|
2432
|
+
chatReqUpdater,
|
|
2433
|
+
supportFor
|
|
2354
2434
|
}) {
|
|
2355
2435
|
if (!apiKey || apiKey === "") {
|
|
2356
2436
|
throw new Error("OpenAI API key not set");
|
|
2357
2437
|
}
|
|
2358
2438
|
const aiImpl = new AxAIOpenAIImpl(
|
|
2359
2439
|
config,
|
|
2360
|
-
options?.streamingUsage ?? true
|
|
2440
|
+
options?.streamingUsage ?? true,
|
|
2441
|
+
chatReqUpdater
|
|
2361
2442
|
);
|
|
2362
2443
|
super(aiImpl, {
|
|
2363
2444
|
name: "OpenAI",
|
|
@@ -2369,16 +2450,19 @@ var AxAIOpenAIBase = class extends AxBaseAI {
|
|
|
2369
2450
|
embedModel: config.embedModel
|
|
2370
2451
|
},
|
|
2371
2452
|
options,
|
|
2372
|
-
supportFor: () => {
|
|
2373
|
-
|
|
2374
|
-
|
|
2453
|
+
supportFor: supportFor ?? ((model) => {
|
|
2454
|
+
const modelInf = modelInfo.find((m) => m.name === model);
|
|
2455
|
+
return {
|
|
2456
|
+
functions: true,
|
|
2457
|
+
streaming: true,
|
|
2458
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
2459
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false
|
|
2460
|
+
};
|
|
2461
|
+
}),
|
|
2375
2462
|
models
|
|
2376
2463
|
});
|
|
2377
2464
|
}
|
|
2378
2465
|
};
|
|
2379
|
-
var isReasoningModel = (model) => ["o1-mini" /* O1Mini */, "o1" /* O1 */, "o3-mini" /* O3Mini */].includes(
|
|
2380
|
-
model
|
|
2381
|
-
);
|
|
2382
2466
|
var AxAIOpenAI = class extends AxAIOpenAIBase {
|
|
2383
2467
|
constructor({
|
|
2384
2468
|
apiKey,
|
|
@@ -2389,6 +2473,15 @@ var AxAIOpenAI = class extends AxAIOpenAIBase {
|
|
|
2389
2473
|
if (!apiKey || apiKey === "") {
|
|
2390
2474
|
throw new Error("OpenAI API key not set");
|
|
2391
2475
|
}
|
|
2476
|
+
const supportForFn = (model) => {
|
|
2477
|
+
const modelInf = axModelInfoOpenAI.find((m) => m.name === model);
|
|
2478
|
+
return {
|
|
2479
|
+
functions: true,
|
|
2480
|
+
streaming: true,
|
|
2481
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
2482
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false
|
|
2483
|
+
};
|
|
2484
|
+
};
|
|
2392
2485
|
super({
|
|
2393
2486
|
apiKey,
|
|
2394
2487
|
config: {
|
|
@@ -2397,7 +2490,8 @@ var AxAIOpenAI = class extends AxAIOpenAIBase {
|
|
|
2397
2490
|
},
|
|
2398
2491
|
options,
|
|
2399
2492
|
modelInfo: axModelInfoOpenAI,
|
|
2400
|
-
models
|
|
2493
|
+
models,
|
|
2494
|
+
supportFor: supportForFn
|
|
2401
2495
|
});
|
|
2402
2496
|
super.setName("OpenAI");
|
|
2403
2497
|
}
|
|
@@ -2436,7 +2530,16 @@ var AxAIAzureOpenAI = class extends AxAIOpenAIBase {
|
|
|
2436
2530
|
config: _config,
|
|
2437
2531
|
options,
|
|
2438
2532
|
models,
|
|
2439
|
-
modelInfo: axModelInfoOpenAI
|
|
2533
|
+
modelInfo: axModelInfoOpenAI,
|
|
2534
|
+
supportFor: (model) => {
|
|
2535
|
+
const modelInf = axModelInfoOpenAI.find((m) => m.name === model);
|
|
2536
|
+
return {
|
|
2537
|
+
functions: true,
|
|
2538
|
+
streaming: true,
|
|
2539
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
2540
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false
|
|
2541
|
+
};
|
|
2542
|
+
}
|
|
2440
2543
|
});
|
|
2441
2544
|
const host = resourceName.includes("://") ? resourceName : `https://${resourceName}.openai.azure.com/`;
|
|
2442
2545
|
super.setName("Azure OpenAI");
|
|
@@ -2895,14 +2998,18 @@ var axModelInfoGoogleGemini = [
|
|
|
2895
2998
|
currency: "usd",
|
|
2896
2999
|
characterIsToken: false,
|
|
2897
3000
|
promptTokenCostPer1M: 2.5,
|
|
2898
|
-
completionTokenCostPer1M: 15
|
|
3001
|
+
completionTokenCostPer1M: 15,
|
|
3002
|
+
hasThinkingBudget: true,
|
|
3003
|
+
hasShowThoughts: true
|
|
2899
3004
|
},
|
|
2900
3005
|
{
|
|
2901
3006
|
name: "gemini-2.5-flash-preview-04-17" /* Gemini25Flash */,
|
|
2902
3007
|
currency: "usd",
|
|
2903
3008
|
characterIsToken: false,
|
|
2904
3009
|
promptTokenCostPer1M: 15,
|
|
2905
|
-
completionTokenCostPer1M: 3.5
|
|
3010
|
+
completionTokenCostPer1M: 3.5,
|
|
3011
|
+
hasThinkingBudget: true,
|
|
3012
|
+
hasShowThoughts: true
|
|
2906
3013
|
},
|
|
2907
3014
|
{
|
|
2908
3015
|
name: "gemini-2.0-flash" /* Gemini20Flash */,
|
|
@@ -3163,7 +3270,20 @@ var AxAIGoogleGeminiImpl = class {
|
|
|
3163
3270
|
thinkingConfig.thinkingBudget = this.config.thinking.thinkingTokenBudget;
|
|
3164
3271
|
}
|
|
3165
3272
|
if (config.thinkingTokenBudget) {
|
|
3166
|
-
|
|
3273
|
+
switch (config.thinkingTokenBudget) {
|
|
3274
|
+
case "minimal":
|
|
3275
|
+
thinkingConfig.thinkingBudget = 0;
|
|
3276
|
+
break;
|
|
3277
|
+
case "low":
|
|
3278
|
+
thinkingConfig.thinkingBudget = 1024;
|
|
3279
|
+
break;
|
|
3280
|
+
case "medium":
|
|
3281
|
+
thinkingConfig.thinkingBudget = 4096;
|
|
3282
|
+
break;
|
|
3283
|
+
case "high":
|
|
3284
|
+
thinkingConfig.thinkingBudget = 8192;
|
|
3285
|
+
break;
|
|
3286
|
+
}
|
|
3167
3287
|
}
|
|
3168
3288
|
const generationConfig = {
|
|
3169
3289
|
maxOutputTokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
|
|
@@ -3364,7 +3484,16 @@ var AxAIGoogleGemini = class extends AxBaseAI {
|
|
|
3364
3484
|
embedModel: _config.embedModel
|
|
3365
3485
|
},
|
|
3366
3486
|
options,
|
|
3367
|
-
supportFor:
|
|
3487
|
+
supportFor: (model) => {
|
|
3488
|
+
const modelInf = axModelInfoGoogleGemini.find((m) => m.name === model);
|
|
3489
|
+
return {
|
|
3490
|
+
functions: true,
|
|
3491
|
+
streaming: true,
|
|
3492
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
3493
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false,
|
|
3494
|
+
functionCot: false
|
|
3495
|
+
};
|
|
3496
|
+
},
|
|
3368
3497
|
models
|
|
3369
3498
|
});
|
|
3370
3499
|
}
|
|
@@ -4129,9 +4258,96 @@ var AxAI = class {
|
|
|
4129
4258
|
}
|
|
4130
4259
|
};
|
|
4131
4260
|
|
|
4261
|
+
// ai/x-grok/types.ts
|
|
4262
|
+
var AxAIGrokModel = /* @__PURE__ */ ((AxAIGrokModel2) => {
|
|
4263
|
+
AxAIGrokModel2["Grok3"] = "grok-3";
|
|
4264
|
+
AxAIGrokModel2["Grok3Mini"] = "grok-3-mini";
|
|
4265
|
+
AxAIGrokModel2["Grok3Fast"] = "grok-3-fast";
|
|
4266
|
+
AxAIGrokModel2["Grok3MiniFast"] = "grok-3-mini-fast";
|
|
4267
|
+
return AxAIGrokModel2;
|
|
4268
|
+
})(AxAIGrokModel || {});
|
|
4269
|
+
var AxAIGrokEmbedModels = /* @__PURE__ */ ((AxAIGrokEmbedModels3) => {
|
|
4270
|
+
AxAIGrokEmbedModels3["GrokEmbedSmall"] = "grok-embed-small";
|
|
4271
|
+
return AxAIGrokEmbedModels3;
|
|
4272
|
+
})(AxAIGrokEmbedModels || {});
|
|
4273
|
+
|
|
4274
|
+
// ai/x-grok/info.ts
|
|
4275
|
+
var axModelInfoGrok = [
|
|
4276
|
+
{
|
|
4277
|
+
name: "grok-3" /* Grok3 */,
|
|
4278
|
+
currency: "USD",
|
|
4279
|
+
promptTokenCostPer1M: 3,
|
|
4280
|
+
completionTokenCostPer1M: 15
|
|
4281
|
+
},
|
|
4282
|
+
{
|
|
4283
|
+
name: "grok-3-mini" /* Grok3Mini */,
|
|
4284
|
+
currency: "USD",
|
|
4285
|
+
promptTokenCostPer1M: 0.3,
|
|
4286
|
+
completionTokenCostPer1M: 0.5,
|
|
4287
|
+
hasThinkingBudget: true
|
|
4288
|
+
},
|
|
4289
|
+
{
|
|
4290
|
+
name: "grok-3-fast" /* Grok3Fast */,
|
|
4291
|
+
currency: "USD",
|
|
4292
|
+
promptTokenCostPer1M: 5,
|
|
4293
|
+
completionTokenCostPer1M: 25
|
|
4294
|
+
},
|
|
4295
|
+
{
|
|
4296
|
+
name: "grok-3-mini-fast" /* Grok3MiniFast */,
|
|
4297
|
+
currency: "USD",
|
|
4298
|
+
promptTokenCostPer1M: 0.6,
|
|
4299
|
+
completionTokenCostPer1M: 4,
|
|
4300
|
+
hasThinkingBudget: true
|
|
4301
|
+
}
|
|
4302
|
+
];
|
|
4303
|
+
|
|
4304
|
+
// ai/x-grok/api.ts
|
|
4305
|
+
var axAIGrokDefaultConfig = () => structuredClone({
|
|
4306
|
+
model: "grok-3-mini" /* Grok3Mini */,
|
|
4307
|
+
...axBaseAIDefaultConfig()
|
|
4308
|
+
});
|
|
4309
|
+
var axAIGrokBestConfig = () => structuredClone({
|
|
4310
|
+
...axAIGrokDefaultConfig(),
|
|
4311
|
+
model: "grok-3" /* Grok3 */
|
|
4312
|
+
});
|
|
4313
|
+
var AxAIGrok = class extends AxAIOpenAIBase {
|
|
4314
|
+
constructor({
|
|
4315
|
+
apiKey,
|
|
4316
|
+
config,
|
|
4317
|
+
options,
|
|
4318
|
+
models
|
|
4319
|
+
}) {
|
|
4320
|
+
if (!apiKey || apiKey === "") {
|
|
4321
|
+
throw new Error("Grok API key not set");
|
|
4322
|
+
}
|
|
4323
|
+
const _config = {
|
|
4324
|
+
...axAIGrokDefaultConfig(),
|
|
4325
|
+
...config
|
|
4326
|
+
};
|
|
4327
|
+
super({
|
|
4328
|
+
apiKey,
|
|
4329
|
+
config: _config,
|
|
4330
|
+
options,
|
|
4331
|
+
apiURL: "https://api.x.ai/v1",
|
|
4332
|
+
modelInfo: axModelInfoGrok,
|
|
4333
|
+
models,
|
|
4334
|
+
supportFor: (model) => {
|
|
4335
|
+
const modelInf = axModelInfoGrok.find((m) => m.name === model);
|
|
4336
|
+
return {
|
|
4337
|
+
functions: true,
|
|
4338
|
+
streaming: true,
|
|
4339
|
+
hasThinkingBudget: modelInf?.hasThinkingBudget ?? false,
|
|
4340
|
+
hasShowThoughts: modelInf?.hasShowThoughts ?? false
|
|
4341
|
+
};
|
|
4342
|
+
}
|
|
4343
|
+
});
|
|
4344
|
+
super.setName("Grok");
|
|
4345
|
+
}
|
|
4346
|
+
};
|
|
4347
|
+
|
|
4132
4348
|
// dsp/generate.ts
|
|
4133
4349
|
var import_web5 = require("stream/web");
|
|
4134
|
-
var
|
|
4350
|
+
var import_api22 = require("@opentelemetry/api");
|
|
4135
4351
|
|
|
4136
4352
|
// ai/util.ts
|
|
4137
4353
|
function mergeFunctionCalls(functionCalls, functionCallDeltas) {
|
|
@@ -6986,7 +7202,7 @@ Content: ${result.content}`
|
|
|
6986
7202
|
const traceLabel = options.traceLabel ?? this.options?.traceLabel;
|
|
6987
7203
|
const spanName = traceLabel ? `${traceLabel} (AxGen)` : "AxGen";
|
|
6988
7204
|
const span = tracer.startSpan(spanName, {
|
|
6989
|
-
kind:
|
|
7205
|
+
kind: import_api22.SpanKind.SERVER,
|
|
6990
7206
|
attributes
|
|
6991
7207
|
});
|
|
6992
7208
|
try {
|
|
@@ -7861,7 +8077,7 @@ var randomSample = (array, n) => {
|
|
|
7861
8077
|
};
|
|
7862
8078
|
|
|
7863
8079
|
// db/base.ts
|
|
7864
|
-
var
|
|
8080
|
+
var import_api23 = require("@opentelemetry/api");
|
|
7865
8081
|
var AxDBBase = class {
|
|
7866
8082
|
name;
|
|
7867
8083
|
fetch;
|
|
@@ -7888,7 +8104,7 @@ var AxDBBase = class {
|
|
|
7888
8104
|
return await this.tracer?.startActiveSpan(
|
|
7889
8105
|
"DB Upsert Request",
|
|
7890
8106
|
{
|
|
7891
|
-
kind:
|
|
8107
|
+
kind: import_api23.SpanKind.SERVER,
|
|
7892
8108
|
attributes: {
|
|
7893
8109
|
[axSpanAttributes.DB_SYSTEM]: this.name,
|
|
7894
8110
|
[axSpanAttributes.DB_OPERATION_NAME]: "upsert",
|
|
@@ -7922,7 +8138,7 @@ var AxDBBase = class {
|
|
|
7922
8138
|
return await this.tracer?.startActiveSpan(
|
|
7923
8139
|
"DB Batch Upsert Request",
|
|
7924
8140
|
{
|
|
7925
|
-
kind:
|
|
8141
|
+
kind: import_api23.SpanKind.SERVER,
|
|
7926
8142
|
attributes: {
|
|
7927
8143
|
[axSpanAttributes.DB_SYSTEM]: this.name,
|
|
7928
8144
|
[axSpanAttributes.DB_OPERATION_NAME]: "upsert",
|
|
@@ -7950,7 +8166,7 @@ var AxDBBase = class {
|
|
|
7950
8166
|
return await this.tracer?.startActiveSpan(
|
|
7951
8167
|
"DB Query Request",
|
|
7952
8168
|
{
|
|
7953
|
-
kind:
|
|
8169
|
+
kind: import_api23.SpanKind.SERVER,
|
|
7954
8170
|
attributes: {
|
|
7955
8171
|
[axSpanAttributes.DB_SYSTEM]: this.name,
|
|
7956
8172
|
[axSpanAttributes.DB_OPERATION_NAME]: "upsert",
|
|
@@ -11764,6 +11980,9 @@ var AxRAG = class extends AxChainOfThought {
|
|
|
11764
11980
|
AxAIGoogleGeminiModel,
|
|
11765
11981
|
AxAIGoogleGeminiSafetyCategory,
|
|
11766
11982
|
AxAIGoogleGeminiSafetyThreshold,
|
|
11983
|
+
AxAIGrok,
|
|
11984
|
+
AxAIGrokEmbedModels,
|
|
11985
|
+
AxAIGrokModel,
|
|
11767
11986
|
AxAIGroq,
|
|
11768
11987
|
AxAIGroqModel,
|
|
11769
11988
|
AxAIHuggingFace,
|
|
@@ -11844,6 +12063,8 @@ var AxRAG = class extends AxChainOfThought {
|
|
|
11844
12063
|
axAIDeepSeekDefaultConfig,
|
|
11845
12064
|
axAIGoogleGeminiDefaultConfig,
|
|
11846
12065
|
axAIGoogleGeminiDefaultCreativeConfig,
|
|
12066
|
+
axAIGrokBestConfig,
|
|
12067
|
+
axAIGrokDefaultConfig,
|
|
11847
12068
|
axAIHuggingFaceCreativeConfig,
|
|
11848
12069
|
axAIHuggingFaceDefaultConfig,
|
|
11849
12070
|
axAIMistralBestConfig,
|
|
@@ -11865,6 +12086,7 @@ var AxRAG = class extends AxChainOfThought {
|
|
|
11865
12086
|
axModelInfoCohere,
|
|
11866
12087
|
axModelInfoDeepSeek,
|
|
11867
12088
|
axModelInfoGoogleGemini,
|
|
12089
|
+
axModelInfoGrok,
|
|
11868
12090
|
axModelInfoGroq,
|
|
11869
12091
|
axModelInfoHuggingFace,
|
|
11870
12092
|
axModelInfoMistral,
|