@ax-llm/ax 11.0.42 → 11.0.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.cjs CHANGED
@@ -46,6 +46,9 @@ __export(index_exports, {
46
46
  AxAIGoogleGeminiModel: () => AxAIGoogleGeminiModel,
47
47
  AxAIGoogleGeminiSafetyCategory: () => AxAIGoogleGeminiSafetyCategory,
48
48
  AxAIGoogleGeminiSafetyThreshold: () => AxAIGoogleGeminiSafetyThreshold,
49
+ AxAIGrok: () => AxAIGrok,
50
+ AxAIGrokEmbedModels: () => AxAIGrokEmbedModels,
51
+ AxAIGrokModel: () => AxAIGrokModel,
49
52
  AxAIGroq: () => AxAIGroq,
50
53
  AxAIGroqModel: () => AxAIGroqModel,
51
54
  AxAIHuggingFace: () => AxAIHuggingFace,
@@ -126,6 +129,8 @@ __export(index_exports, {
126
129
  axAIDeepSeekDefaultConfig: () => axAIDeepSeekDefaultConfig,
127
130
  axAIGoogleGeminiDefaultConfig: () => axAIGoogleGeminiDefaultConfig,
128
131
  axAIGoogleGeminiDefaultCreativeConfig: () => axAIGoogleGeminiDefaultCreativeConfig,
132
+ axAIGrokBestConfig: () => axAIGrokBestConfig,
133
+ axAIGrokDefaultConfig: () => axAIGrokDefaultConfig,
129
134
  axAIHuggingFaceCreativeConfig: () => axAIHuggingFaceCreativeConfig,
130
135
  axAIHuggingFaceDefaultConfig: () => axAIHuggingFaceDefaultConfig,
131
136
  axAIMistralBestConfig: () => axAIMistralBestConfig,
@@ -147,6 +152,7 @@ __export(index_exports, {
147
152
  axModelInfoCohere: () => axModelInfoCohere,
148
153
  axModelInfoDeepSeek: () => axModelInfoDeepSeek,
149
154
  axModelInfoGoogleGemini: () => axModelInfoGoogleGemini,
155
+ axModelInfoGrok: () => axModelInfoGrok,
150
156
  axModelInfoGroq: () => axModelInfoGroq,
151
157
  axModelInfoHuggingFace: () => axModelInfoHuggingFace,
152
158
  axModelInfoMistral: () => axModelInfoMistral,
@@ -165,6 +171,7 @@ var import_api2 = require("@opentelemetry/api");
165
171
  var axSpanAttributes = {
166
172
  // LLM
167
173
  LLM_SYSTEM: "gen_ai.system",
174
+ LLM_OPERATION_NAME: "gen_ai.operation.name",
168
175
  LLM_REQUEST_MODEL: "gen_ai.request.model",
169
176
  LLM_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens",
170
177
  LLM_REQUEST_TEMPERATURE: "gen_ai.request.temperature",
@@ -174,8 +181,10 @@ var axSpanAttributes = {
174
181
  LLM_REQUEST_STOP_SEQUENCES: "gen_ai.request.stop_sequences",
175
182
  LLM_REQUEST_LLM_IS_STREAMING: "gen_ai.request.llm_is_streaming",
176
183
  LLM_REQUEST_TOP_P: "gen_ai.request.top_p",
177
- LLM_USAGE_PROMPT_TOKENS: "gen_ai.usage.prompt_tokens",
178
- LLM_USAGE_COMPLETION_TOKENS: "gen_ai.usage.completion_tokens",
184
+ LLM_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens",
185
+ LLM_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens",
186
+ LLM_USAGE_TOTAL_TOKENS: "gen_ai.usage.total_tokens",
187
+ LLM_USAGE_THOUGHTS_TOKENS: "gen_ai.usage.thoughts_tokens",
179
188
  // Vector DB
180
189
  DB_SYSTEM: "db.system",
181
190
  DB_TABLE: "db.table",
@@ -198,7 +207,13 @@ var axSpanAttributes = {
198
207
  DB_QUERY_RESULT_DOCUMENT: "db.query.result.document"
199
208
  };
200
209
  var axSpanEvents = {
201
- LLM_PROMPT: "gen_ai.prompt"
210
+ GEN_AI_USER_MESSAGE: "gen_ai.user.message",
211
+ GEN_AI_SYSTEM_MESSAGE: "gen_ai.system.message",
212
+ GEN_AI_ASSISTANT_MESSAGE: "gen_ai.assistant.message",
213
+ GEN_AI_TOOL_MESSAGE: "gen_ai.tool.message",
214
+ // For tool messages in request & response tool calls
215
+ GEN_AI_CHOICE: "gen_ai.choice",
216
+ GEN_AI_USAGE: "gen_ai.usage"
202
217
  };
203
218
  var AxLLMRequestTypeValues = /* @__PURE__ */ ((AxLLMRequestTypeValues2) => {
204
219
  AxLLMRequestTypeValues2["COMPLETION"] = "completion";
@@ -636,9 +651,6 @@ var apiCall = async (api, json) => {
636
651
  } finally {
637
652
  clearTimeout(timeoutId);
638
653
  reader.releaseLock();
639
- if (api.span?.isRecording()) {
640
- api.span.end();
641
- }
642
654
  }
643
655
  }
644
656
  read();
@@ -686,9 +698,6 @@ var apiCall = async (api, json) => {
686
698
  if (timeoutId !== void 0) {
687
699
  clearTimeout(timeoutId);
688
700
  }
689
- if (api.span?.isRecording()) {
690
- api.span.end();
691
- }
692
701
  }
693
702
  }
694
703
  };
@@ -902,6 +911,7 @@ var AxBaseAI = class {
902
911
  fetch;
903
912
  tracer;
904
913
  timeout;
914
+ excludeContentFromTrace;
905
915
  models;
906
916
  modelInfo;
907
917
  modelUsage;
@@ -962,13 +972,16 @@ var AxBaseAI = class {
962
972
  this.fetch = options.fetch;
963
973
  this.timeout = options.timeout;
964
974
  this.tracer = options.tracer;
975
+ this.excludeContentFromTrace = options.excludeContentFromTrace;
965
976
  }
966
977
  getOptions() {
967
978
  return {
968
979
  debug: this.debug,
969
980
  rateLimiter: this.rt,
970
981
  fetch: this.fetch,
971
- tracer: this.tracer
982
+ tracer: this.tracer,
983
+ timeout: this.timeout,
984
+ excludeContentFromTrace: this.excludeContentFromTrace
972
985
  };
973
986
  }
974
987
  getModelList() {
@@ -1068,11 +1081,12 @@ var AxBaseAI = class {
1068
1081
  }
1069
1082
  if (this.tracer) {
1070
1083
  return await this.tracer?.startActiveSpan(
1071
- "Chat Request",
1084
+ "AI Chat Request",
1072
1085
  {
1073
1086
  kind: import_api2.SpanKind.SERVER,
1074
1087
  attributes: {
1075
1088
  [axSpanAttributes.LLM_SYSTEM]: this.name,
1089
+ [axSpanAttributes.LLM_OPERATION_NAME]: "chat",
1076
1090
  [axSpanAttributes.LLM_REQUEST_MODEL]: model,
1077
1091
  [axSpanAttributes.LLM_REQUEST_MAX_TOKENS]: modelConfig.maxTokens,
1078
1092
  [axSpanAttributes.LLM_REQUEST_TEMPERATURE]: modelConfig.temperature,
@@ -1082,17 +1096,10 @@ var AxBaseAI = class {
1082
1096
  [axSpanAttributes.LLM_REQUEST_PRESENCE_PENALTY]: modelConfig.presencePenalty,
1083
1097
  [axSpanAttributes.LLM_REQUEST_STOP_SEQUENCES]: modelConfig.stopSequences?.join(", "),
1084
1098
  [axSpanAttributes.LLM_REQUEST_LLM_IS_STREAMING]: modelConfig.stream
1085
- // [AxSpanAttributes.LLM_PROMPTS]: _req.chatPrompt
1086
- // ?.map((v) => v.content)
1087
- // .join('\n')
1088
1099
  }
1089
1100
  },
1090
1101
  async (span) => {
1091
- try {
1092
- return await this._chat2(model, modelConfig, req, options, span);
1093
- } finally {
1094
- span.end();
1095
- }
1102
+ return await this._chat2(model, modelConfig, req, options, span);
1096
1103
  }
1097
1104
  );
1098
1105
  }
@@ -1138,6 +1145,9 @@ var AxBaseAI = class {
1138
1145
  req,
1139
1146
  options
1140
1147
  );
1148
+ if (span?.isRecording()) {
1149
+ setChatRequestEvents(chatReq, span, this.excludeContentFromTrace);
1150
+ }
1141
1151
  const res2 = await apiCall(
1142
1152
  {
1143
1153
  name: apiConfig.name,
@@ -1175,7 +1185,7 @@ var AxBaseAI = class {
1175
1185
  }
1176
1186
  this.modelUsage = res2.modelUsage;
1177
1187
  if (span?.isRecording()) {
1178
- setResponseAttr(res2, span);
1188
+ setChatResponseEvents(res2, span, this.excludeContentFromTrace);
1179
1189
  }
1180
1190
  if (debug) {
1181
1191
  logResponse(res2);
@@ -1186,6 +1196,9 @@ var AxBaseAI = class {
1186
1196
  if (debug) {
1187
1197
  process.stdout.write("\n");
1188
1198
  }
1199
+ if (span?.isRecording()) {
1200
+ span.end();
1201
+ }
1189
1202
  };
1190
1203
  const st = rv.pipeThrough(
1191
1204
  new RespTransformStream(
@@ -1200,16 +1213,26 @@ var AxBaseAI = class {
1200
1213
  }
1201
1214
  const res = this.aiImpl.createChatResp(rv);
1202
1215
  res.sessionId = options?.sessionId;
1216
+ if (!res.modelUsage) {
1217
+ const tokenUsage = this.aiImpl.getTokenUsage();
1218
+ if (tokenUsage) {
1219
+ res.modelUsage = {
1220
+ ai: this.name,
1221
+ model,
1222
+ tokens: tokenUsage
1223
+ };
1224
+ }
1225
+ }
1203
1226
  if (res.modelUsage) {
1204
1227
  this.modelUsage = res.modelUsage;
1205
1228
  }
1206
1229
  if (span?.isRecording()) {
1207
- setResponseAttr(res, span);
1230
+ setChatResponseEvents(res, span, this.excludeContentFromTrace);
1231
+ span.end();
1208
1232
  }
1209
1233
  if (debug) {
1210
1234
  logResponse(res);
1211
1235
  }
1212
- span?.end();
1213
1236
  return res;
1214
1237
  }
1215
1238
  async embed(req, options) {
@@ -1233,11 +1256,12 @@ var AxBaseAI = class {
1233
1256
  }
1234
1257
  if (this.tracer) {
1235
1258
  await this.tracer?.startActiveSpan(
1236
- "Embed Request",
1259
+ "AI Embed Request",
1237
1260
  {
1238
1261
  kind: import_api2.SpanKind.SERVER,
1239
1262
  attributes: {
1240
1263
  [axSpanAttributes.LLM_SYSTEM]: this.name,
1264
+ [axSpanAttributes.LLM_OPERATION_NAME]: "embeddings",
1241
1265
  [axSpanAttributes.LLM_REQUEST_MODEL]: embedModel
1242
1266
  }
1243
1267
  },
@@ -1292,8 +1316,12 @@ var AxBaseAI = class {
1292
1316
  };
1293
1317
  }
1294
1318
  this.embedModelUsage = res.modelUsage;
1295
- if (span?.isRecording()) {
1296
- setResponseAttr(res, span);
1319
+ if (span?.isRecording() && res.modelUsage?.tokens) {
1320
+ span.addEvent(axSpanEvents.GEN_AI_USAGE, {
1321
+ [axSpanAttributes.LLM_USAGE_INPUT_TOKENS]: res.modelUsage.tokens.promptTokens,
1322
+ [axSpanAttributes.LLM_USAGE_OUTPUT_TOKENS]: res.modelUsage.tokens.completionTokens ?? 0,
1323
+ [axSpanAttributes.LLM_USAGE_TOTAL_TOKENS]: res.modelUsage.tokens.totalTokens
1324
+ });
1297
1325
  }
1298
1326
  span?.end();
1299
1327
  return res;
@@ -1317,11 +1345,113 @@ var AxBaseAI = class {
1317
1345
  return item && "embedModel" in item ? item.embedModel : void 0;
1318
1346
  }
1319
1347
  };
1320
- function setResponseAttr(res, span) {
1321
- if (res.modelUsage) {
1322
- span.setAttributes({
1323
- [axSpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: res.modelUsage.tokens?.completionTokens ?? 0,
1324
- [axSpanAttributes.LLM_USAGE_PROMPT_TOKENS]: res.modelUsage.tokens?.promptTokens
1348
+ function setChatRequestEvents(req, span, excludeContentFromTrace) {
1349
+ const userMessages = [];
1350
+ if (req.chatPrompt && Array.isArray(req.chatPrompt) && req.chatPrompt.length > 0) {
1351
+ for (const prompt of req.chatPrompt) {
1352
+ switch (prompt.role) {
1353
+ case "system":
1354
+ if (prompt.content) {
1355
+ const eventData2 = {};
1356
+ if (!excludeContentFromTrace) {
1357
+ eventData2.content = prompt.content;
1358
+ }
1359
+ span.addEvent(axSpanEvents.GEN_AI_SYSTEM_MESSAGE, eventData2);
1360
+ }
1361
+ break;
1362
+ case "user":
1363
+ if (typeof prompt.content === "string") {
1364
+ userMessages.push(prompt.content);
1365
+ } else if (Array.isArray(prompt.content)) {
1366
+ for (const part of prompt.content) {
1367
+ if (part.type === "text") {
1368
+ userMessages.push(part.text);
1369
+ }
1370
+ }
1371
+ }
1372
+ break;
1373
+ case "assistant":
1374
+ const functionCalls = prompt.functionCalls?.map((call) => {
1375
+ return {
1376
+ id: call.id,
1377
+ type: call.type,
1378
+ function: call.function.name,
1379
+ arguments: call.function.params
1380
+ };
1381
+ });
1382
+ if (functionCalls && functionCalls.length > 0) {
1383
+ const eventData2 = {
1384
+ function_calls: JSON.stringify(functionCalls, null, 2)
1385
+ };
1386
+ if (!excludeContentFromTrace && prompt.content) {
1387
+ eventData2.content = prompt.content;
1388
+ }
1389
+ span.addEvent(axSpanEvents.GEN_AI_ASSISTANT_MESSAGE, eventData2);
1390
+ } else if (prompt.content) {
1391
+ const eventData2 = {};
1392
+ if (!excludeContentFromTrace) {
1393
+ eventData2.content = prompt.content;
1394
+ }
1395
+ span.addEvent(axSpanEvents.GEN_AI_ASSISTANT_MESSAGE, eventData2);
1396
+ }
1397
+ break;
1398
+ case "function":
1399
+ const eventData = {
1400
+ id: prompt.functionId
1401
+ };
1402
+ if (!excludeContentFromTrace) {
1403
+ eventData.content = prompt.result;
1404
+ }
1405
+ span.addEvent(axSpanEvents.GEN_AI_TOOL_MESSAGE, eventData);
1406
+ break;
1407
+ }
1408
+ }
1409
+ }
1410
+ const userEventData = {};
1411
+ if (!excludeContentFromTrace) {
1412
+ userEventData.content = userMessages.join("\n");
1413
+ }
1414
+ span.addEvent(axSpanEvents.GEN_AI_USER_MESSAGE, userEventData);
1415
+ }
1416
+ function setChatResponseEvents(res, span, excludeContentFromTrace) {
1417
+ if (res.modelUsage?.tokens) {
1418
+ const thoughTokens = res.modelUsage.tokens.thoughtsTokens ? {
1419
+ [axSpanAttributes.LLM_USAGE_THOUGHTS_TOKENS]: res.modelUsage.tokens.thoughtsTokens
1420
+ } : {};
1421
+ span.addEvent(axSpanEvents.GEN_AI_USAGE, {
1422
+ [axSpanAttributes.LLM_USAGE_INPUT_TOKENS]: res.modelUsage.tokens.promptTokens,
1423
+ [axSpanAttributes.LLM_USAGE_OUTPUT_TOKENS]: res.modelUsage.tokens.completionTokens ?? 0,
1424
+ [axSpanAttributes.LLM_USAGE_TOTAL_TOKENS]: res.modelUsage.tokens.totalTokens,
1425
+ ...thoughTokens
1426
+ });
1427
+ }
1428
+ if (!res.results) {
1429
+ return;
1430
+ }
1431
+ for (const [index, result] of res.results.entries()) {
1432
+ const toolCalls = result.functionCalls?.map((call) => {
1433
+ return {
1434
+ id: call.id,
1435
+ type: call.type,
1436
+ function: call.function.name,
1437
+ arguments: call.function.params
1438
+ };
1439
+ });
1440
+ let message = {};
1441
+ if (toolCalls && toolCalls.length > 0) {
1442
+ if (!excludeContentFromTrace) {
1443
+ message.content = result.content;
1444
+ }
1445
+ message.tool_calls = toolCalls;
1446
+ } else {
1447
+ if (!excludeContentFromTrace) {
1448
+ message.content = result.content ?? "";
1449
+ }
1450
+ }
1451
+ span.addEvent(axSpanEvents.GEN_AI_CHOICE, {
1452
+ finish_reason: result.finishReason,
1453
+ index,
1454
+ message: JSON.stringify(message, null, 2)
1325
1455
  });
1326
1456
  }
1327
1457
  }
@@ -1978,9 +2108,10 @@ var axAIOpenAIFastConfig = () => ({
1978
2108
  model: "gpt-4.1-mini" /* GPT41Mini */
1979
2109
  });
1980
2110
  var AxAIOpenAIImpl = class {
1981
- constructor(config, streamingUsage) {
2111
+ constructor(config, streamingUsage, chatReqUpdater) {
1982
2112
  this.config = config;
1983
2113
  this.streamingUsage = streamingUsage;
2114
+ this.chatReqUpdater = chatReqUpdater;
1984
2115
  }
1985
2116
  tokensUsed;
1986
2117
  getTokenUsage() {
@@ -2000,7 +2131,7 @@ var AxAIOpenAIImpl = class {
2000
2131
  stream: config.stream
2001
2132
  };
2002
2133
  }
2003
- createChatReq(req, _config) {
2134
+ createChatReq(req, config) {
2004
2135
  const model = req.model;
2005
2136
  if (!req.chatPrompt || req.chatPrompt.length === 0) {
2006
2137
  throw new Error("Chat prompt is empty");
@@ -2020,12 +2151,11 @@ var AxAIOpenAIImpl = class {
2020
2151
  const messages = createMessages2(req);
2021
2152
  const frequencyPenalty = req.modelConfig?.frequencyPenalty ?? this.config.frequencyPenalty;
2022
2153
  const stream = req.modelConfig?.stream ?? this.config.stream;
2023
- const reasoningEffort = isReasoningModel(model) ? this.config.reasoningEffort : void 0;
2024
2154
  const store = this.config.store;
2025
- const reqValue = {
2155
+ let reqValue = {
2026
2156
  model,
2027
2157
  messages,
2028
- response_format: this.config?.responseFormat ? { type: this.config?.responseFormat } : void 0,
2158
+ response_format: this.config?.responseFormat ? { type: this.config.responseFormat } : void 0,
2029
2159
  tools,
2030
2160
  tool_choice: toolsChoice,
2031
2161
  max_completion_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens ?? 500,
@@ -2037,9 +2167,29 @@ var AxAIOpenAIImpl = class {
2037
2167
  logit_bias: this.config.logitBias,
2038
2168
  ...frequencyPenalty ? { frequency_penalty: frequencyPenalty } : {},
2039
2169
  ...stream && this.streamingUsage ? { stream: true, stream_options: { include_usage: true } } : {},
2040
- ...reasoningEffort ? { reasoning_effort: reasoningEffort } : {},
2041
2170
  ...store ? { store } : {}
2042
2171
  };
2172
+ if (this.config.reasoningEffort) {
2173
+ reqValue.reasoning_effort = this.config.reasoningEffort;
2174
+ }
2175
+ if (config.thinkingTokenBudget) {
2176
+ switch (config.thinkingTokenBudget) {
2177
+ case "minimal":
2178
+ reqValue.reasoning_effort = "low";
2179
+ break;
2180
+ case "low":
2181
+ reqValue.reasoning_effort = "medium";
2182
+ break;
2183
+ case "medium":
2184
+ reqValue.reasoning_effort = "high";
2185
+ break;
2186
+ case "high":
2187
+ reqValue.reasoning_effort = "high";
2188
+ }
2189
+ }
2190
+ if (this.chatReqUpdater) {
2191
+ reqValue = this.chatReqUpdater(reqValue);
2192
+ }
2043
2193
  return [apiConfig, reqValue];
2044
2194
  }
2045
2195
  createEmbedReq(req) {
@@ -2222,14 +2372,16 @@ var AxAIOpenAIBase = class extends AxBaseAI {
2222
2372
  options,
2223
2373
  apiURL,
2224
2374
  modelInfo,
2225
- models
2375
+ models,
2376
+ chatReqUpdater
2226
2377
  }) {
2227
2378
  if (!apiKey || apiKey === "") {
2228
2379
  throw new Error("OpenAI API key not set");
2229
2380
  }
2230
2381
  const aiImpl = new AxAIOpenAIImpl(
2231
2382
  config,
2232
- options?.streamingUsage ?? true
2383
+ options?.streamingUsage ?? true,
2384
+ chatReqUpdater
2233
2385
  );
2234
2386
  super(aiImpl, {
2235
2387
  name: "OpenAI",
@@ -2248,9 +2400,6 @@ var AxAIOpenAIBase = class extends AxBaseAI {
2248
2400
  });
2249
2401
  }
2250
2402
  };
2251
- var isReasoningModel = (model) => ["o1-mini" /* O1Mini */, "o1" /* O1 */, "o3-mini" /* O3Mini */].includes(
2252
- model
2253
- );
2254
2403
  var AxAIOpenAI = class extends AxAIOpenAIBase {
2255
2404
  constructor({
2256
2405
  apiKey,
@@ -2714,7 +2863,7 @@ var AxAIDeepSeek = class extends AxAIOpenAIBase {
2714
2863
 
2715
2864
  // ai/google-gemini/types.ts
2716
2865
  var AxAIGoogleGeminiModel = /* @__PURE__ */ ((AxAIGoogleGeminiModel2) => {
2717
- AxAIGoogleGeminiModel2["Gemini25Pro"] = "gemini-2.5-pro-preview-03-25";
2866
+ AxAIGoogleGeminiModel2["Gemini25Pro"] = "gemini-2.5-pro-preview-05-06";
2718
2867
  AxAIGoogleGeminiModel2["Gemini25Flash"] = "gemini-2.5-flash-preview-04-17";
2719
2868
  AxAIGoogleGeminiModel2["Gemini20Flash"] = "gemini-2.0-flash";
2720
2869
  AxAIGoogleGeminiModel2["Gemini20FlashLite"] = "gemini-2.0-flash-lite-preview-02-05";
@@ -2763,7 +2912,7 @@ var AxAIGoogleGeminiEmbedTypes = /* @__PURE__ */ ((AxAIGoogleGeminiEmbedTypes2)
2763
2912
  // ai/google-gemini/info.ts
2764
2913
  var axModelInfoGoogleGemini = [
2765
2914
  {
2766
- name: "gemini-2.5-pro-preview-03-25" /* Gemini25Pro */,
2915
+ name: "gemini-2.5-pro-preview-05-06" /* Gemini25Pro */,
2767
2916
  currency: "usd",
2768
2917
  characterIsToken: false,
2769
2918
  promptTokenCostPer1M: 2.5,
@@ -2847,7 +2996,7 @@ var safetySettings = [
2847
2996
  }
2848
2997
  ];
2849
2998
  var axAIGoogleGeminiDefaultConfig = () => structuredClone({
2850
- model: "gemini-2.0-flash" /* Gemini20Flash */,
2999
+ model: "gemini-2.5-flash-preview-04-17" /* Gemini25Flash */,
2851
3000
  embedModel: "text-embedding-005" /* TextEmbedding005 */,
2852
3001
  safetySettings,
2853
3002
  ...axBaseAIDefaultConfig()
@@ -2888,7 +3037,7 @@ var AxAIGoogleGeminiImpl = class {
2888
3037
  n: config.n
2889
3038
  };
2890
3039
  }
2891
- createChatReq = (req) => {
3040
+ createChatReq = (req, config) => {
2892
3041
  const model = req.model;
2893
3042
  const stream = req.modelConfig?.stream ?? this.config.stream;
2894
3043
  if (!req.chatPrompt || req.chatPrompt.length === 0) {
@@ -2999,6 +3148,9 @@ var AxAIGoogleGeminiImpl = class {
2999
3148
  }
3000
3149
  });
3001
3150
  }
3151
+ if (this.options?.urlContext) {
3152
+ tools.push({ url_context: {} });
3153
+ }
3002
3154
  if (tools.length === 0) {
3003
3155
  tools = void 0;
3004
3156
  }
@@ -3024,6 +3176,29 @@ var AxAIGoogleGeminiImpl = class {
3024
3176
  } else if (tools && tools.length > 0) {
3025
3177
  toolConfig = { function_calling_config: { mode: "AUTO" } };
3026
3178
  }
3179
+ const thinkingConfig = {};
3180
+ if (this.config.thinking?.includeThoughts) {
3181
+ thinkingConfig.includeThoughts = true;
3182
+ }
3183
+ if (this.config.thinking?.thinkingTokenBudget) {
3184
+ thinkingConfig.thinkingBudget = this.config.thinking.thinkingTokenBudget;
3185
+ }
3186
+ if (config.thinkingTokenBudget) {
3187
+ switch (config.thinkingTokenBudget) {
3188
+ case "minimal":
3189
+ thinkingConfig.thinkingBudget = 0;
3190
+ break;
3191
+ case "low":
3192
+ thinkingConfig.thinkingBudget = 1024;
3193
+ break;
3194
+ case "medium":
3195
+ thinkingConfig.thinkingBudget = 4096;
3196
+ break;
3197
+ case "high":
3198
+ thinkingConfig.thinkingBudget = 8192;
3199
+ break;
3200
+ }
3201
+ }
3027
3202
  const generationConfig = {
3028
3203
  maxOutputTokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
3029
3204
  temperature: req.modelConfig?.temperature ?? this.config.temperature,
@@ -3033,9 +3208,7 @@ var AxAIGoogleGeminiImpl = class {
3033
3208
  candidateCount: 1,
3034
3209
  stopSequences: req.modelConfig?.stopSequences ?? this.config.stopSequences,
3035
3210
  responseMimeType: "text/plain",
3036
- ...this.config.thinkingConfig && {
3037
- thinkingConfig: this.config.thinkingConfig
3038
- }
3211
+ ...thinkingConfig ? { thinkingConfig } : {}
3039
3212
  };
3040
3213
  const safetySettings2 = this.config.safetySettings;
3041
3214
  const reqValue = {
@@ -3116,7 +3289,11 @@ var AxAIGoogleGeminiImpl = class {
3116
3289
  }
3117
3290
  for (const part of candidate.content.parts) {
3118
3291
  if ("text" in part) {
3119
- result.content = part.text;
3292
+ if ("thought" in part && part.thought) {
3293
+ result.thought = part.text;
3294
+ } else {
3295
+ result.content = part.text;
3296
+ }
3120
3297
  continue;
3121
3298
  }
3122
3299
  if ("functionCall" in part) {
@@ -3139,7 +3316,8 @@ var AxAIGoogleGeminiImpl = class {
3139
3316
  this.tokensUsed = {
3140
3317
  totalTokens: resp.usageMetadata.totalTokenCount,
3141
3318
  promptTokens: resp.usageMetadata.promptTokenCount,
3142
- completionTokens: resp.usageMetadata.candidatesTokenCount
3319
+ completionTokens: resp.usageMetadata.candidatesTokenCount,
3320
+ thoughtsTokens: resp.usageMetadata.thoughtsTokenCount
3143
3321
  };
3144
3322
  }
3145
3323
  return { results };
@@ -3985,9 +4163,85 @@ var AxAI = class {
3985
4163
  }
3986
4164
  };
3987
4165
 
4166
+ // ai/x-grok/types.ts
4167
+ var AxAIGrokModel = /* @__PURE__ */ ((AxAIGrokModel2) => {
4168
+ AxAIGrokModel2["Grok3"] = "grok-3";
4169
+ AxAIGrokModel2["Grok3Mini"] = "grok-3-mini";
4170
+ AxAIGrokModel2["Grok3Fast"] = "grok-3-fast";
4171
+ AxAIGrokModel2["Grok3MiniFast"] = "grok-3-mini-fast";
4172
+ return AxAIGrokModel2;
4173
+ })(AxAIGrokModel || {});
4174
+ var AxAIGrokEmbedModels = /* @__PURE__ */ ((AxAIGrokEmbedModels3) => {
4175
+ AxAIGrokEmbedModels3["GrokEmbedSmall"] = "grok-embed-small";
4176
+ return AxAIGrokEmbedModels3;
4177
+ })(AxAIGrokEmbedModels || {});
4178
+
4179
+ // ai/x-grok/info.ts
4180
+ var axModelInfoGrok = [
4181
+ {
4182
+ name: "grok-3" /* Grok3 */,
4183
+ currency: "USD",
4184
+ promptTokenCostPer1M: 3,
4185
+ completionTokenCostPer1M: 15
4186
+ },
4187
+ {
4188
+ name: "grok-3-mini" /* Grok3Mini */,
4189
+ currency: "USD",
4190
+ promptTokenCostPer1M: 0.3,
4191
+ completionTokenCostPer1M: 0.5
4192
+ },
4193
+ {
4194
+ name: "grok-3-fast" /* Grok3Fast */,
4195
+ currency: "USD",
4196
+ promptTokenCostPer1M: 5,
4197
+ completionTokenCostPer1M: 25
4198
+ },
4199
+ {
4200
+ name: "grok-3-mini-fast" /* Grok3MiniFast */,
4201
+ currency: "USD",
4202
+ promptTokenCostPer1M: 0.6,
4203
+ completionTokenCostPer1M: 4
4204
+ }
4205
+ ];
4206
+
4207
+ // ai/x-grok/api.ts
4208
+ var axAIGrokDefaultConfig = () => structuredClone({
4209
+ model: "grok-3-mini" /* Grok3Mini */,
4210
+ ...axBaseAIDefaultConfig()
4211
+ });
4212
+ var axAIGrokBestConfig = () => structuredClone({
4213
+ ...axAIGrokDefaultConfig(),
4214
+ model: "grok-3" /* Grok3 */
4215
+ });
4216
+ var AxAIGrok = class extends AxAIOpenAIBase {
4217
+ constructor({
4218
+ apiKey,
4219
+ config,
4220
+ options,
4221
+ models
4222
+ }) {
4223
+ if (!apiKey || apiKey === "") {
4224
+ throw new Error("Grok API key not set");
4225
+ }
4226
+ const _config = {
4227
+ ...axAIGrokDefaultConfig(),
4228
+ ...config
4229
+ };
4230
+ super({
4231
+ apiKey,
4232
+ config: _config,
4233
+ options,
4234
+ apiURL: "https://api.x.ai/v1",
4235
+ modelInfo: axModelInfoGrok,
4236
+ models
4237
+ });
4238
+ super.setName("Grok");
4239
+ }
4240
+ };
4241
+
3988
4242
  // dsp/generate.ts
3989
4243
  var import_web5 = require("stream/web");
3990
- var import_api21 = require("@opentelemetry/api");
4244
+ var import_api22 = require("@opentelemetry/api");
3991
4245
 
3992
4246
  // ai/util.ts
3993
4247
  function mergeFunctionCalls(functionCalls, functionCallDeltas) {
@@ -4730,6 +4984,14 @@ var AxSignature = class _AxSignature {
4730
4984
  };
4731
4985
  hash = () => this.sigHash;
4732
4986
  toString = () => this.sigString;
4987
+ toJSON = () => {
4988
+ return {
4989
+ id: this.hash(),
4990
+ description: this.description,
4991
+ inputFields: this.inputFields,
4992
+ outputFields: this.outputFields
4993
+ };
4994
+ };
4733
4995
  };
4734
4996
  function renderField(field) {
4735
4997
  let result = field.name;
@@ -6270,7 +6532,7 @@ var parseFunctions = (newFuncs, existingFuncs) => {
6270
6532
  }
6271
6533
  return [...existingFuncs ?? [], ...functions];
6272
6534
  };
6273
- var processFunctions = async (ai, functionList, functionCalls, mem, sessionId, traceId) => {
6535
+ var processFunctions = async (ai, functionList, functionCalls, mem, sessionId, traceId, span, excludeContentFromTelemetry) => {
6274
6536
  const funcProc = new AxFunctionProcessor(functionList);
6275
6537
  const functionsExecuted = /* @__PURE__ */ new Set();
6276
6538
  const promises = functionCalls.map((func) => {
@@ -6279,6 +6541,16 @@ var processFunctions = async (ai, functionList, functionCalls, mem, sessionId, t
6279
6541
  }
6280
6542
  const promise = funcProc.execute(func, { sessionId, traceId, ai }).then((functionResult) => {
6281
6543
  functionsExecuted.add(func.name.toLowerCase());
6544
+ if (span) {
6545
+ const eventData = {
6546
+ name: func.name
6547
+ };
6548
+ if (!excludeContentFromTelemetry) {
6549
+ eventData.args = func.args;
6550
+ eventData.result = functionResult ?? "";
6551
+ }
6552
+ span.addEvent("function.call", eventData);
6553
+ }
6282
6554
  return {
6283
6555
  role: "function",
6284
6556
  result: functionResult ?? "",
@@ -6287,6 +6559,17 @@ var processFunctions = async (ai, functionList, functionCalls, mem, sessionId, t
6287
6559
  }).catch((e) => {
6288
6560
  if (e instanceof FunctionError) {
6289
6561
  const result = e.getFixingInstructions();
6562
+ if (span) {
6563
+ const errorEventData = {
6564
+ name: func.name,
6565
+ message: e.toString()
6566
+ };
6567
+ if (!excludeContentFromTelemetry) {
6568
+ errorEventData.args = func.args;
6569
+ errorEventData.fixing_instructions = result;
6570
+ }
6571
+ span.addEvent("function.error", errorEventData);
6572
+ }
6290
6573
  mem.add(
6291
6574
  {
6292
6575
  role: "function",
@@ -6344,6 +6627,8 @@ var AxGen = class extends AxProgramWithSignature {
6344
6627
  functionsExecuted = /* @__PURE__ */ new Set();
6345
6628
  fieldProcessors = [];
6346
6629
  streamingFieldProcessors = [];
6630
+ values = {};
6631
+ excludeContentFromTrace = false;
6347
6632
  constructor(signature, options) {
6348
6633
  super(signature, { description: options?.description });
6349
6634
  this.options = options;
@@ -6353,6 +6638,7 @@ var AxGen = class extends AxProgramWithSignature {
6353
6638
  );
6354
6639
  this.asserts = this.options?.asserts ?? [];
6355
6640
  this.streamingAsserts = this.options?.streamingAsserts ?? [];
6641
+ this.excludeContentFromTrace = options?.excludeContentFromTrace ?? false;
6356
6642
  this.usage = [];
6357
6643
  if (options?.functions) {
6358
6644
  this.functions = parseFunctions(options.functions);
@@ -6401,7 +6687,8 @@ var AxGen = class extends AxProgramWithSignature {
6401
6687
  rateLimiter,
6402
6688
  stream,
6403
6689
  functions: _functions,
6404
- functionCall: _functionCall
6690
+ functionCall: _functionCall,
6691
+ thinkingTokenBudget
6405
6692
  } = options ?? {};
6406
6693
  const chatPrompt = mem?.history(sessionId) ?? [];
6407
6694
  if (chatPrompt.length === 0) {
@@ -6422,7 +6709,8 @@ var AxGen = class extends AxProgramWithSignature {
6422
6709
  traceId,
6423
6710
  rateLimiter,
6424
6711
  stream,
6425
- debug: false
6712
+ debug: false,
6713
+ thinkingTokenBudget
6426
6714
  }
6427
6715
  );
6428
6716
  return res;
@@ -6430,7 +6718,8 @@ var AxGen = class extends AxProgramWithSignature {
6430
6718
  async *forwardCore({
6431
6719
  ai,
6432
6720
  mem,
6433
- options
6721
+ options,
6722
+ span
6434
6723
  }) {
6435
6724
  const { sessionId, traceId, functions: _functions } = options ?? {};
6436
6725
  const fastFail = options?.fastFail ?? this.options?.fastFail;
@@ -6450,7 +6739,8 @@ var AxGen = class extends AxProgramWithSignature {
6450
6739
  traceId,
6451
6740
  sessionId,
6452
6741
  functions,
6453
- fastFail
6742
+ fastFail,
6743
+ span
6454
6744
  });
6455
6745
  } else {
6456
6746
  yield await this.processResponse({
@@ -6460,7 +6750,8 @@ var AxGen = class extends AxProgramWithSignature {
6460
6750
  mem,
6461
6751
  traceId,
6462
6752
  sessionId,
6463
- functions
6753
+ functions,
6754
+ span
6464
6755
  });
6465
6756
  }
6466
6757
  }
@@ -6472,11 +6763,12 @@ var AxGen = class extends AxProgramWithSignature {
6472
6763
  sessionId,
6473
6764
  traceId,
6474
6765
  functions,
6475
- fastFail
6766
+ fastFail,
6767
+ span
6476
6768
  }) {
6477
6769
  const streamingValidation = fastFail ?? ai.getFeatures(model).functionCot !== true;
6478
6770
  const functionCalls = [];
6479
- const values = {};
6771
+ this.values = {};
6480
6772
  const xstate = {
6481
6773
  extractedFields: [],
6482
6774
  streamedIndex: {},
@@ -6503,6 +6795,9 @@ var AxGen = class extends AxProgramWithSignature {
6503
6795
  sessionId
6504
6796
  );
6505
6797
  } else if (result.content) {
6798
+ if (result.thought && result.thought.length > 0) {
6799
+ yield { thought: result.thought };
6800
+ }
6506
6801
  content += result.content;
6507
6802
  mem.updateResult(
6508
6803
  { name: result.name, content, delta: result.content },
@@ -6510,7 +6805,7 @@ var AxGen = class extends AxProgramWithSignature {
6510
6805
  );
6511
6806
  const skip = streamingExtractValues(
6512
6807
  this.signature,
6513
- values,
6808
+ this.values,
6514
6809
  xstate,
6515
6810
  content,
6516
6811
  streamingValidation
@@ -6531,12 +6826,20 @@ var AxGen = class extends AxProgramWithSignature {
6531
6826
  content,
6532
6827
  xstate,
6533
6828
  mem,
6534
- values,
6829
+ this.values,
6535
6830
  sessionId
6536
6831
  );
6537
6832
  }
6538
- yield* streamValues(this.signature, content, values, xstate);
6539
- await assertAssertions(this.asserts, values);
6833
+ yield* streamValues(
6834
+ this.signature,
6835
+ content,
6836
+ this.values,
6837
+ xstate
6838
+ );
6839
+ await assertAssertions(this.asserts, this.values);
6840
+ } else if (result.thought && result.thought.length > 0) {
6841
+ this.values.thought = this.values.thought ?? "" + result.thought;
6842
+ yield { thought: result.thought };
6540
6843
  }
6541
6844
  if (result.finishReason === "length") {
6542
6845
  throw new Error(
@@ -6545,7 +6848,7 @@ Content: ${content}`
6545
6848
  );
6546
6849
  }
6547
6850
  }
6548
- const funcs = parseFunctionCalls(ai, functionCalls, values, model);
6851
+ const funcs = parseFunctionCalls(ai, functionCalls, this.values, model);
6549
6852
  if (funcs) {
6550
6853
  if (!functions) {
6551
6854
  throw new Error("Functions are not defined");
@@ -6556,22 +6859,24 @@ Content: ${content}`
6556
6859
  funcs,
6557
6860
  mem,
6558
6861
  sessionId,
6559
- traceId
6862
+ traceId,
6863
+ span,
6864
+ this.excludeContentFromTrace
6560
6865
  );
6561
6866
  this.functionsExecuted = /* @__PURE__ */ new Set([...this.functionsExecuted, ...fx]);
6562
6867
  } else {
6563
- streamingExtractFinalValue(this.signature, values, xstate, content);
6868
+ streamingExtractFinalValue(this.signature, this.values, xstate, content);
6564
6869
  await assertStreamingAssertions(
6565
6870
  this.streamingAsserts,
6566
6871
  xstate,
6567
6872
  content,
6568
6873
  true
6569
6874
  );
6570
- await assertAssertions(this.asserts, values);
6875
+ await assertAssertions(this.asserts, this.values);
6571
6876
  if (this.fieldProcessors.length) {
6572
6877
  await processFieldProcessors(
6573
6878
  this.fieldProcessors,
6574
- values,
6879
+ this.values,
6575
6880
  mem,
6576
6881
  sessionId
6577
6882
  );
@@ -6582,12 +6887,17 @@ Content: ${content}`
6582
6887
  content,
6583
6888
  xstate,
6584
6889
  mem,
6585
- values,
6890
+ this.values,
6586
6891
  sessionId,
6587
6892
  true
6588
6893
  );
6589
6894
  }
6590
- yield* streamValues(this.signature, content, values, xstate);
6895
+ yield* streamValues(
6896
+ this.signature,
6897
+ content,
6898
+ this.values,
6899
+ xstate
6900
+ );
6591
6901
  }
6592
6902
  }
6593
6903
  async processResponse({
@@ -6596,9 +6906,10 @@ Content: ${content}`
6596
6906
  mem,
6597
6907
  sessionId,
6598
6908
  traceId,
6599
- functions
6909
+ functions,
6910
+ span
6600
6911
  }) {
6601
- const values = {};
6912
+ this.values = {};
6602
6913
  let results = res.results ?? [];
6603
6914
  if (results.length > 1) {
6604
6915
  results = results.filter((r) => r.functionCalls);
@@ -6609,7 +6920,7 @@ Content: ${content}`
6609
6920
  }
6610
6921
  mem.addResult(result, sessionId);
6611
6922
  if (result.functionCalls?.length) {
6612
- const funcs = parseFunctionCalls(ai, result.functionCalls, values);
6923
+ const funcs = parseFunctionCalls(ai, result.functionCalls, this.values);
6613
6924
  if (funcs) {
6614
6925
  if (!functions) {
6615
6926
  throw new Error("Functions are not defined");
@@ -6620,17 +6931,22 @@ Content: ${content}`
6620
6931
  funcs,
6621
6932
  mem,
6622
6933
  sessionId,
6623
- traceId
6934
+ traceId,
6935
+ span,
6936
+ this.excludeContentFromTrace
6624
6937
  );
6625
6938
  this.functionsExecuted = /* @__PURE__ */ new Set([...this.functionsExecuted, ...fx]);
6626
6939
  }
6627
6940
  } else if (result.content) {
6628
- extractValues(this.signature, values, result.content);
6629
- await assertAssertions(this.asserts, values);
6941
+ if (result.thought && result.thought.length > 0) {
6942
+ this.values.thought = result.thought;
6943
+ }
6944
+ extractValues(this.signature, this.values, result.content);
6945
+ await assertAssertions(this.asserts, this.values);
6630
6946
  if (this.fieldProcessors.length) {
6631
6947
  await processFieldProcessors(
6632
6948
  this.fieldProcessors,
6633
- values,
6949
+ this.values,
6634
6950
  mem,
6635
6951
  sessionId
6636
6952
  );
@@ -6643,13 +6959,12 @@ Content: ${result.content}`
6643
6959
  );
6644
6960
  }
6645
6961
  }
6646
- const publicValues = { ...values };
6647
6962
  for (const field of this.signature.getOutputFields()) {
6648
6963
  if (field.isInternal) {
6649
- delete publicValues[field.name];
6964
+ delete this.values[field.name];
6650
6965
  }
6651
6966
  }
6652
- return { ...values };
6967
+ return { ...this.values };
6653
6968
  }
6654
6969
  async *_forward2(ai, values, options, span) {
6655
6970
  const stopFunction = (options?.stopFunction ?? this.options?.stopFunction)?.toLowerCase();
@@ -6675,7 +6990,7 @@ Content: ${result.content}`
6675
6990
  multiStepLoop: for (let n = 0; n < maxSteps; n++) {
6676
6991
  for (let errCount = 0; errCount < maxRetries; errCount++) {
6677
6992
  try {
6678
- const generator = this.forwardCore({ options, ai, mem });
6993
+ const generator = this.forwardCore({ options, ai, mem, span });
6679
6994
  for await (const delta of generator) {
6680
6995
  if (delta !== void 0) {
6681
6996
  yield { version: errCount, delta };
@@ -6699,10 +7014,22 @@ Content: ${result.content}`
6699
7014
  if (e instanceof ValidationError) {
6700
7015
  errorFields = e.getFixingInstructions();
6701
7016
  err = e;
7017
+ if (span) {
7018
+ span.addEvent("validation.error", {
7019
+ message: e.toString(),
7020
+ fixing_instructions: errorFields?.map((f) => f.title).join(", ") ?? ""
7021
+ });
7022
+ }
6702
7023
  } else if (e instanceof AxAssertionError) {
6703
7024
  const e1 = e;
6704
7025
  errorFields = e1.getFixingInstructions();
6705
7026
  err = e;
7027
+ if (span) {
7028
+ span.addEvent("assertion.error", {
7029
+ message: e1.toString(),
7030
+ fixing_instructions: errorFields?.map((f) => f.title).join(", ") ?? ""
7031
+ });
7032
+ }
6706
7033
  } else if (e instanceof AxAIServiceStreamTerminatedError) {
6707
7034
  } else {
6708
7035
  throw enhanceError(e, ai, this.signature);
@@ -6757,14 +7084,25 @@ Content: ${result.content}`
6757
7084
  }
6758
7085
  const funcNames = functions?.map((f) => f.name).join(",");
6759
7086
  const attributes = {
6760
- "generate.signature": this.signature.toString(),
6761
- "generate.functions": funcNames ?? ""
7087
+ signature: JSON.stringify(this.signature.toJSON(), null, 2),
7088
+ ...this.examples ? { examples: JSON.stringify(this.examples, null, 2) } : {},
7089
+ ...funcNames ? { provided_functions: funcNames } : {},
7090
+ ...options?.model ? { model: options.model } : {},
7091
+ ...options?.thinkingTokenBudget ? { thinking_token_budget: options.thinkingTokenBudget } : {},
7092
+ ...options?.maxSteps ? { max_steps: options.maxSteps } : {},
7093
+ ...options?.maxRetries ? { max_retries: options.maxRetries } : {},
7094
+ ...options?.fastFail ? { fast_fail: options.fastFail } : {}
6762
7095
  };
6763
- const span = tracer.startSpan("Generate", {
6764
- kind: import_api21.SpanKind.SERVER,
7096
+ const traceLabel = options.traceLabel ?? this.options?.traceLabel;
7097
+ const spanName = traceLabel ? `${traceLabel} (AxGen)` : "AxGen";
7098
+ const span = tracer.startSpan(spanName, {
7099
+ kind: import_api22.SpanKind.SERVER,
6765
7100
  attributes
6766
7101
  });
6767
7102
  try {
7103
+ if (!this.excludeContentFromTrace) {
7104
+ span.addEvent("input", { content: JSON.stringify(values, null, 2) });
7105
+ }
6768
7106
  yield* this._forward2(
6769
7107
  ai,
6770
7108
  values,
@@ -6774,6 +7112,11 @@ Content: ${result.content}`
6774
7112
  },
6775
7113
  span
6776
7114
  );
7115
+ if (!this.excludeContentFromTrace) {
7116
+ span.addEvent("output", {
7117
+ content: JSON.stringify(this.values, null, 2)
7118
+ });
7119
+ }
6777
7120
  } finally {
6778
7121
  span.end();
6779
7122
  }
@@ -7628,7 +7971,7 @@ var randomSample = (array, n) => {
7628
7971
  };
7629
7972
 
7630
7973
  // db/base.ts
7631
- var import_api22 = require("@opentelemetry/api");
7974
+ var import_api23 = require("@opentelemetry/api");
7632
7975
  var AxDBBase = class {
7633
7976
  name;
7634
7977
  fetch;
@@ -7655,7 +7998,7 @@ var AxDBBase = class {
7655
7998
  return await this.tracer?.startActiveSpan(
7656
7999
  "DB Upsert Request",
7657
8000
  {
7658
- kind: import_api22.SpanKind.SERVER,
8001
+ kind: import_api23.SpanKind.SERVER,
7659
8002
  attributes: {
7660
8003
  [axSpanAttributes.DB_SYSTEM]: this.name,
7661
8004
  [axSpanAttributes.DB_OPERATION_NAME]: "upsert",
@@ -7689,7 +8032,7 @@ var AxDBBase = class {
7689
8032
  return await this.tracer?.startActiveSpan(
7690
8033
  "DB Batch Upsert Request",
7691
8034
  {
7692
- kind: import_api22.SpanKind.SERVER,
8035
+ kind: import_api23.SpanKind.SERVER,
7693
8036
  attributes: {
7694
8037
  [axSpanAttributes.DB_SYSTEM]: this.name,
7695
8038
  [axSpanAttributes.DB_OPERATION_NAME]: "upsert",
@@ -7717,7 +8060,7 @@ var AxDBBase = class {
7717
8060
  return await this.tracer?.startActiveSpan(
7718
8061
  "DB Query Request",
7719
8062
  {
7720
- kind: import_api22.SpanKind.SERVER,
8063
+ kind: import_api23.SpanKind.SERVER,
7721
8064
  attributes: {
7722
8065
  [axSpanAttributes.DB_SYSTEM]: this.name,
7723
8066
  [axSpanAttributes.DB_OPERATION_NAME]: "upsert",
@@ -11531,6 +11874,9 @@ var AxRAG = class extends AxChainOfThought {
11531
11874
  AxAIGoogleGeminiModel,
11532
11875
  AxAIGoogleGeminiSafetyCategory,
11533
11876
  AxAIGoogleGeminiSafetyThreshold,
11877
+ AxAIGrok,
11878
+ AxAIGrokEmbedModels,
11879
+ AxAIGrokModel,
11534
11880
  AxAIGroq,
11535
11881
  AxAIGroqModel,
11536
11882
  AxAIHuggingFace,
@@ -11611,6 +11957,8 @@ var AxRAG = class extends AxChainOfThought {
11611
11957
  axAIDeepSeekDefaultConfig,
11612
11958
  axAIGoogleGeminiDefaultConfig,
11613
11959
  axAIGoogleGeminiDefaultCreativeConfig,
11960
+ axAIGrokBestConfig,
11961
+ axAIGrokDefaultConfig,
11614
11962
  axAIHuggingFaceCreativeConfig,
11615
11963
  axAIHuggingFaceDefaultConfig,
11616
11964
  axAIMistralBestConfig,
@@ -11632,6 +11980,7 @@ var AxRAG = class extends AxChainOfThought {
11632
11980
  axModelInfoCohere,
11633
11981
  axModelInfoDeepSeek,
11634
11982
  axModelInfoGoogleGemini,
11983
+ axModelInfoGrok,
11635
11984
  axModelInfoGroq,
11636
11985
  axModelInfoHuggingFace,
11637
11986
  axModelInfoMistral,