@ax-llm/ax 11.0.42 → 11.0.44

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.d.cts CHANGED
@@ -65,6 +65,7 @@ interface AxAIFeatures {
65
65
  functions: boolean;
66
66
  streaming: boolean;
67
67
  functionCot?: boolean;
68
+ thinkingTokenBudget?: boolean;
68
69
  }
69
70
  interface AxBaseAIArgs<TModel, TEmbedModel> {
70
71
  name: string;
@@ -88,6 +89,7 @@ declare class AxBaseAI<TModel, TEmbedModel, TChatRequest, TEmbedRequest, TChatRe
88
89
  private fetch?;
89
90
  private tracer?;
90
91
  private timeout?;
92
+ private excludeContentFromTrace?;
91
93
  private models?;
92
94
  private modelInfo;
93
95
  private modelUsage?;
@@ -160,6 +162,7 @@ type AxTokenUsage = {
160
162
  promptTokens: number;
161
163
  completionTokens: number;
162
164
  totalTokens: number;
165
+ thoughtsTokens?: number;
163
166
  };
164
167
  type AxModelConfig = {
165
168
  maxTokens?: number;
@@ -196,6 +199,7 @@ type AxFunction = {
196
199
  };
197
200
  type AxChatResponseResult = {
198
201
  content?: string;
202
+ thought?: string;
199
203
  name?: string;
200
204
  id?: string;
201
205
  functionCalls?: {
@@ -325,6 +329,7 @@ type AxRateLimiterFunction = <T = unknown>(reqFunc: () => Promise<T | ReadableSt
325
329
  }>) => Promise<T | ReadableStream$1<T>>;
326
330
  type AxAIPromptConfig = {
327
331
  stream?: boolean;
332
+ thinkingTokenBudget?: number;
328
333
  };
329
334
  type AxAIServiceOptions = {
330
335
  debug?: boolean;
@@ -332,6 +337,7 @@ type AxAIServiceOptions = {
332
337
  fetch?: typeof fetch;
333
338
  tracer?: Tracer;
334
339
  timeout?: number;
340
+ excludeContentFromTrace?: boolean;
335
341
  };
336
342
  type AxAIServiceActionOptions<TModel = unknown, TEmbedModel = unknown> = {
337
343
  ai?: Readonly<AxAIService<TModel, TEmbedModel>>;
@@ -926,7 +932,7 @@ declare class AxAIDeepSeek extends AxAIOpenAIBase<AxAIDeepSeekModel, undefined>
926
932
  }
927
933
 
928
934
  declare enum AxAIGoogleGeminiModel {
929
- Gemini25Pro = "gemini-2.5-pro-preview-03-25",
935
+ Gemini25Pro = "gemini-2.5-pro-preview-05-06",
930
936
  Gemini25Flash = "gemini-2.5-flash-preview-04-17",
931
937
  Gemini20Flash = "gemini-2.0-flash",
932
938
  Gemini20FlashLite = "gemini-2.0-flash-lite-preview-02-05",
@@ -970,6 +976,7 @@ type AxAIGoogleGeminiContent = {
970
976
  role: 'user';
971
977
  parts: ({
972
978
  text: string;
979
+ thought?: string;
973
980
  } | {
974
981
  inlineData: {
975
982
  mimeType: string;
@@ -1015,6 +1022,7 @@ type AxAIGoogleGeminiTool = {
1015
1022
  function_declarations?: AxAIGoogleGeminiToolFunctionDeclaration[];
1016
1023
  code_execution?: object;
1017
1024
  google_search_retrieval?: AxAIGoogleGeminiToolGoogleSearchRetrieval;
1025
+ url_context?: object;
1018
1026
  };
1019
1027
  type AxAIGoogleGeminiToolConfig = {
1020
1028
  function_calling_config: {
@@ -1026,9 +1034,15 @@ type AxAIGoogleGeminiGenerationConfig = {
1026
1034
  temperature?: number;
1027
1035
  topP?: number;
1028
1036
  topK?: number;
1037
+ frequencyPenalty?: number;
1029
1038
  candidateCount?: number;
1030
1039
  maxOutputTokens?: number;
1031
1040
  stopSequences?: readonly string[];
1041
+ responseMimeType?: string;
1042
+ thinkingConfig?: {
1043
+ thinkingBudget?: number;
1044
+ includeThoughts?: boolean;
1045
+ };
1032
1046
  };
1033
1047
  type AxAIGoogleGeminiSafetySettings = {
1034
1048
  category: AxAIGoogleGeminiSafetyCategory;
@@ -1065,11 +1079,13 @@ type AxAIGoogleGeminiChatResponse = {
1065
1079
  promptTokenCount: number;
1066
1080
  candidatesTokenCount: number;
1067
1081
  totalTokenCount: number;
1082
+ thoughtsTokenCount: number;
1068
1083
  };
1069
1084
  };
1070
1085
  type AxAIGoogleGeminiChatResponseDelta = AxAIGoogleGeminiChatResponse;
1071
1086
  type AxAIGoogleGeminiThinkingConfig = {
1072
- thinkingBudget: number;
1087
+ thinkingTokenBudget?: number;
1088
+ includeThoughts?: boolean;
1073
1089
  };
1074
1090
  /**
1075
1091
  * AxAIGoogleGeminiConfig: Configuration options for Google Gemini API
@@ -1081,7 +1097,8 @@ type AxAIGoogleGeminiConfig = AxModelConfig & {
1081
1097
  embedType?: AxAIGoogleGeminiEmbedTypes;
1082
1098
  dimensions?: number;
1083
1099
  autoTruncate?: boolean;
1084
- thinkingConfig?: AxAIGoogleGeminiThinkingConfig;
1100
+ thinking?: AxAIGoogleGeminiThinkingConfig;
1101
+ urlContext?: string;
1085
1102
  };
1086
1103
  /**
1087
1104
  * AxAIGoogleGeminiEmbedRequest: Structure for making an embedding request to the Google Gemini API.
@@ -1140,6 +1157,7 @@ interface AxAIGoogleGeminiOptionsTools {
1140
1157
  dynamicThreshold?: number;
1141
1158
  };
1142
1159
  googleSearch?: boolean;
1160
+ urlContext?: boolean;
1143
1161
  }
1144
1162
  interface AxAIGoogleGeminiArgs {
1145
1163
  name: 'google-gemini';
@@ -1567,6 +1585,12 @@ declare class AxSignature {
1567
1585
  private updateHash;
1568
1586
  hash: () => string;
1569
1587
  toString: () => string;
1588
+ toJSON: () => {
1589
+ id: string;
1590
+ description: string | undefined;
1591
+ inputFields: AxIField[];
1592
+ outputFields: AxIField[];
1593
+ };
1570
1594
  }
1571
1595
 
1572
1596
  type AxFieldValue = string | string[] | number | boolean | object | null | undefined | {
@@ -1613,6 +1637,8 @@ type AxProgramForwardOptions = {
1613
1637
  fastFail?: boolean;
1614
1638
  debug?: boolean;
1615
1639
  debugHideSystemPrompt?: boolean;
1640
+ thinkingTokenBudget?: number;
1641
+ traceLabel?: string;
1616
1642
  };
1617
1643
  type AxProgramStreamingForwardOptions = Omit<AxProgramForwardOptions, 'stream'>;
1618
1644
  type AxGenDeltaOut<OUT> = {
@@ -1777,9 +1803,11 @@ interface AxGenOptions {
1777
1803
  asserts?: AxAssertion[];
1778
1804
  streamingAsserts?: AxStreamingAssertion[];
1779
1805
  fastFail?: boolean;
1806
+ excludeContentFromTrace?: boolean;
1807
+ traceLabel?: string;
1780
1808
  }
1781
1809
  type AxGenerateResult<OUT extends AxGenOut> = OUT & {
1782
- functions?: AxChatResponseFunctionCall[];
1810
+ thought?: string;
1783
1811
  };
1784
1812
  interface AxResponseHandlerArgs<T> {
1785
1813
  ai: Readonly<AxAIService>;
@@ -1790,6 +1818,7 @@ interface AxResponseHandlerArgs<T> {
1790
1818
  traceId?: string;
1791
1819
  functions?: Readonly<AxFunction[]>;
1792
1820
  fastFail?: boolean;
1821
+ span?: Span;
1793
1822
  }
1794
1823
  interface AxStreamingEvent<T> {
1795
1824
  event: 'delta' | 'done' | 'error';
@@ -1809,6 +1838,8 @@ declare class AxGen<IN extends AxGenIn = AxGenIn, OUT extends AxGenerateResult<A
1809
1838
  private functionsExecuted;
1810
1839
  private fieldProcessors;
1811
1840
  private streamingFieldProcessors;
1841
+ private values;
1842
+ private excludeContentFromTrace;
1812
1843
  constructor(signature: Readonly<AxSignature | string>, options?: Readonly<AxGenOptions>);
1813
1844
  addAssert: (fn: AxAssertion["fn"], message?: string) => void;
1814
1845
  addStreamingAssert: (fieldName: string, fn: AxStreamingAssertion["fn"], message?: string) => void;
@@ -2382,6 +2413,7 @@ declare class AxJSInterpreter {
2382
2413
 
2383
2414
  declare const axSpanAttributes: {
2384
2415
  LLM_SYSTEM: string;
2416
+ LLM_OPERATION_NAME: string;
2385
2417
  LLM_REQUEST_MODEL: string;
2386
2418
  LLM_REQUEST_MAX_TOKENS: string;
2387
2419
  LLM_REQUEST_TEMPERATURE: string;
@@ -2391,8 +2423,10 @@ declare const axSpanAttributes: {
2391
2423
  LLM_REQUEST_STOP_SEQUENCES: string;
2392
2424
  LLM_REQUEST_LLM_IS_STREAMING: string;
2393
2425
  LLM_REQUEST_TOP_P: string;
2394
- LLM_USAGE_PROMPT_TOKENS: string;
2395
- LLM_USAGE_COMPLETION_TOKENS: string;
2426
+ LLM_USAGE_INPUT_TOKENS: string;
2427
+ LLM_USAGE_OUTPUT_TOKENS: string;
2428
+ LLM_USAGE_TOTAL_TOKENS: string;
2429
+ LLM_USAGE_THOUGHTS_TOKENS: string;
2396
2430
  DB_SYSTEM: string;
2397
2431
  DB_TABLE: string;
2398
2432
  DB_NAMESPACE: string;
@@ -2412,7 +2446,12 @@ declare const axSpanAttributes: {
2412
2446
  DB_QUERY_RESULT_DOCUMENT: string;
2413
2447
  };
2414
2448
  declare const axSpanEvents: {
2415
- LLM_PROMPT: string;
2449
+ GEN_AI_USER_MESSAGE: string;
2450
+ GEN_AI_SYSTEM_MESSAGE: string;
2451
+ GEN_AI_ASSISTANT_MESSAGE: string;
2452
+ GEN_AI_TOOL_MESSAGE: string;
2453
+ GEN_AI_CHOICE: string;
2454
+ GEN_AI_USAGE: string;
2416
2455
  };
2417
2456
  declare enum AxLLMRequestTypeValues {
2418
2457
  COMPLETION = "completion",
package/index.d.ts CHANGED
@@ -65,6 +65,7 @@ interface AxAIFeatures {
65
65
  functions: boolean;
66
66
  streaming: boolean;
67
67
  functionCot?: boolean;
68
+ thinkingTokenBudget?: boolean;
68
69
  }
69
70
  interface AxBaseAIArgs<TModel, TEmbedModel> {
70
71
  name: string;
@@ -88,6 +89,7 @@ declare class AxBaseAI<TModel, TEmbedModel, TChatRequest, TEmbedRequest, TChatRe
88
89
  private fetch?;
89
90
  private tracer?;
90
91
  private timeout?;
92
+ private excludeContentFromTrace?;
91
93
  private models?;
92
94
  private modelInfo;
93
95
  private modelUsage?;
@@ -160,6 +162,7 @@ type AxTokenUsage = {
160
162
  promptTokens: number;
161
163
  completionTokens: number;
162
164
  totalTokens: number;
165
+ thoughtsTokens?: number;
163
166
  };
164
167
  type AxModelConfig = {
165
168
  maxTokens?: number;
@@ -196,6 +199,7 @@ type AxFunction = {
196
199
  };
197
200
  type AxChatResponseResult = {
198
201
  content?: string;
202
+ thought?: string;
199
203
  name?: string;
200
204
  id?: string;
201
205
  functionCalls?: {
@@ -325,6 +329,7 @@ type AxRateLimiterFunction = <T = unknown>(reqFunc: () => Promise<T | ReadableSt
325
329
  }>) => Promise<T | ReadableStream$1<T>>;
326
330
  type AxAIPromptConfig = {
327
331
  stream?: boolean;
332
+ thinkingTokenBudget?: number;
328
333
  };
329
334
  type AxAIServiceOptions = {
330
335
  debug?: boolean;
@@ -332,6 +337,7 @@ type AxAIServiceOptions = {
332
337
  fetch?: typeof fetch;
333
338
  tracer?: Tracer;
334
339
  timeout?: number;
340
+ excludeContentFromTrace?: boolean;
335
341
  };
336
342
  type AxAIServiceActionOptions<TModel = unknown, TEmbedModel = unknown> = {
337
343
  ai?: Readonly<AxAIService<TModel, TEmbedModel>>;
@@ -926,7 +932,7 @@ declare class AxAIDeepSeek extends AxAIOpenAIBase<AxAIDeepSeekModel, undefined>
926
932
  }
927
933
 
928
934
  declare enum AxAIGoogleGeminiModel {
929
- Gemini25Pro = "gemini-2.5-pro-preview-03-25",
935
+ Gemini25Pro = "gemini-2.5-pro-preview-05-06",
930
936
  Gemini25Flash = "gemini-2.5-flash-preview-04-17",
931
937
  Gemini20Flash = "gemini-2.0-flash",
932
938
  Gemini20FlashLite = "gemini-2.0-flash-lite-preview-02-05",
@@ -970,6 +976,7 @@ type AxAIGoogleGeminiContent = {
970
976
  role: 'user';
971
977
  parts: ({
972
978
  text: string;
979
+ thought?: string;
973
980
  } | {
974
981
  inlineData: {
975
982
  mimeType: string;
@@ -1015,6 +1022,7 @@ type AxAIGoogleGeminiTool = {
1015
1022
  function_declarations?: AxAIGoogleGeminiToolFunctionDeclaration[];
1016
1023
  code_execution?: object;
1017
1024
  google_search_retrieval?: AxAIGoogleGeminiToolGoogleSearchRetrieval;
1025
+ url_context?: object;
1018
1026
  };
1019
1027
  type AxAIGoogleGeminiToolConfig = {
1020
1028
  function_calling_config: {
@@ -1026,9 +1034,15 @@ type AxAIGoogleGeminiGenerationConfig = {
1026
1034
  temperature?: number;
1027
1035
  topP?: number;
1028
1036
  topK?: number;
1037
+ frequencyPenalty?: number;
1029
1038
  candidateCount?: number;
1030
1039
  maxOutputTokens?: number;
1031
1040
  stopSequences?: readonly string[];
1041
+ responseMimeType?: string;
1042
+ thinkingConfig?: {
1043
+ thinkingBudget?: number;
1044
+ includeThoughts?: boolean;
1045
+ };
1032
1046
  };
1033
1047
  type AxAIGoogleGeminiSafetySettings = {
1034
1048
  category: AxAIGoogleGeminiSafetyCategory;
@@ -1065,11 +1079,13 @@ type AxAIGoogleGeminiChatResponse = {
1065
1079
  promptTokenCount: number;
1066
1080
  candidatesTokenCount: number;
1067
1081
  totalTokenCount: number;
1082
+ thoughtsTokenCount: number;
1068
1083
  };
1069
1084
  };
1070
1085
  type AxAIGoogleGeminiChatResponseDelta = AxAIGoogleGeminiChatResponse;
1071
1086
  type AxAIGoogleGeminiThinkingConfig = {
1072
- thinkingBudget: number;
1087
+ thinkingTokenBudget?: number;
1088
+ includeThoughts?: boolean;
1073
1089
  };
1074
1090
  /**
1075
1091
  * AxAIGoogleGeminiConfig: Configuration options for Google Gemini API
@@ -1081,7 +1097,8 @@ type AxAIGoogleGeminiConfig = AxModelConfig & {
1081
1097
  embedType?: AxAIGoogleGeminiEmbedTypes;
1082
1098
  dimensions?: number;
1083
1099
  autoTruncate?: boolean;
1084
- thinkingConfig?: AxAIGoogleGeminiThinkingConfig;
1100
+ thinking?: AxAIGoogleGeminiThinkingConfig;
1101
+ urlContext?: string;
1085
1102
  };
1086
1103
  /**
1087
1104
  * AxAIGoogleGeminiEmbedRequest: Structure for making an embedding request to the Google Gemini API.
@@ -1140,6 +1157,7 @@ interface AxAIGoogleGeminiOptionsTools {
1140
1157
  dynamicThreshold?: number;
1141
1158
  };
1142
1159
  googleSearch?: boolean;
1160
+ urlContext?: boolean;
1143
1161
  }
1144
1162
  interface AxAIGoogleGeminiArgs {
1145
1163
  name: 'google-gemini';
@@ -1567,6 +1585,12 @@ declare class AxSignature {
1567
1585
  private updateHash;
1568
1586
  hash: () => string;
1569
1587
  toString: () => string;
1588
+ toJSON: () => {
1589
+ id: string;
1590
+ description: string | undefined;
1591
+ inputFields: AxIField[];
1592
+ outputFields: AxIField[];
1593
+ };
1570
1594
  }
1571
1595
 
1572
1596
  type AxFieldValue = string | string[] | number | boolean | object | null | undefined | {
@@ -1613,6 +1637,8 @@ type AxProgramForwardOptions = {
1613
1637
  fastFail?: boolean;
1614
1638
  debug?: boolean;
1615
1639
  debugHideSystemPrompt?: boolean;
1640
+ thinkingTokenBudget?: number;
1641
+ traceLabel?: string;
1616
1642
  };
1617
1643
  type AxProgramStreamingForwardOptions = Omit<AxProgramForwardOptions, 'stream'>;
1618
1644
  type AxGenDeltaOut<OUT> = {
@@ -1777,9 +1803,11 @@ interface AxGenOptions {
1777
1803
  asserts?: AxAssertion[];
1778
1804
  streamingAsserts?: AxStreamingAssertion[];
1779
1805
  fastFail?: boolean;
1806
+ excludeContentFromTrace?: boolean;
1807
+ traceLabel?: string;
1780
1808
  }
1781
1809
  type AxGenerateResult<OUT extends AxGenOut> = OUT & {
1782
- functions?: AxChatResponseFunctionCall[];
1810
+ thought?: string;
1783
1811
  };
1784
1812
  interface AxResponseHandlerArgs<T> {
1785
1813
  ai: Readonly<AxAIService>;
@@ -1790,6 +1818,7 @@ interface AxResponseHandlerArgs<T> {
1790
1818
  traceId?: string;
1791
1819
  functions?: Readonly<AxFunction[]>;
1792
1820
  fastFail?: boolean;
1821
+ span?: Span;
1793
1822
  }
1794
1823
  interface AxStreamingEvent<T> {
1795
1824
  event: 'delta' | 'done' | 'error';
@@ -1809,6 +1838,8 @@ declare class AxGen<IN extends AxGenIn = AxGenIn, OUT extends AxGenerateResult<A
1809
1838
  private functionsExecuted;
1810
1839
  private fieldProcessors;
1811
1840
  private streamingFieldProcessors;
1841
+ private values;
1842
+ private excludeContentFromTrace;
1812
1843
  constructor(signature: Readonly<AxSignature | string>, options?: Readonly<AxGenOptions>);
1813
1844
  addAssert: (fn: AxAssertion["fn"], message?: string) => void;
1814
1845
  addStreamingAssert: (fieldName: string, fn: AxStreamingAssertion["fn"], message?: string) => void;
@@ -2382,6 +2413,7 @@ declare class AxJSInterpreter {
2382
2413
 
2383
2414
  declare const axSpanAttributes: {
2384
2415
  LLM_SYSTEM: string;
2416
+ LLM_OPERATION_NAME: string;
2385
2417
  LLM_REQUEST_MODEL: string;
2386
2418
  LLM_REQUEST_MAX_TOKENS: string;
2387
2419
  LLM_REQUEST_TEMPERATURE: string;
@@ -2391,8 +2423,10 @@ declare const axSpanAttributes: {
2391
2423
  LLM_REQUEST_STOP_SEQUENCES: string;
2392
2424
  LLM_REQUEST_LLM_IS_STREAMING: string;
2393
2425
  LLM_REQUEST_TOP_P: string;
2394
- LLM_USAGE_PROMPT_TOKENS: string;
2395
- LLM_USAGE_COMPLETION_TOKENS: string;
2426
+ LLM_USAGE_INPUT_TOKENS: string;
2427
+ LLM_USAGE_OUTPUT_TOKENS: string;
2428
+ LLM_USAGE_TOTAL_TOKENS: string;
2429
+ LLM_USAGE_THOUGHTS_TOKENS: string;
2396
2430
  DB_SYSTEM: string;
2397
2431
  DB_TABLE: string;
2398
2432
  DB_NAMESPACE: string;
@@ -2412,7 +2446,12 @@ declare const axSpanAttributes: {
2412
2446
  DB_QUERY_RESULT_DOCUMENT: string;
2413
2447
  };
2414
2448
  declare const axSpanEvents: {
2415
- LLM_PROMPT: string;
2449
+ GEN_AI_USER_MESSAGE: string;
2450
+ GEN_AI_SYSTEM_MESSAGE: string;
2451
+ GEN_AI_ASSISTANT_MESSAGE: string;
2452
+ GEN_AI_TOOL_MESSAGE: string;
2453
+ GEN_AI_CHOICE: string;
2454
+ GEN_AI_USAGE: string;
2416
2455
  };
2417
2456
  declare enum AxLLMRequestTypeValues {
2418
2457
  COMPLETION = "completion",