@ax-llm/ax 11.0.58 → 11.0.59

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.d.cts CHANGED
@@ -346,6 +346,7 @@ type AxLoggerFunction = (message: string, options?: {
346
346
  type AxAIPromptConfig = {
347
347
  stream?: boolean;
348
348
  thinkingTokenBudget?: 'minimal' | 'low' | 'medium' | 'high' | 'highest' | 'none';
349
+ showThoughts?: boolean;
349
350
  };
350
351
  type AxAIServiceOptions = {
351
352
  debug?: boolean;
@@ -365,7 +366,6 @@ type AxAIServiceActionOptions<TModel = unknown, TEmbedModel = unknown> = {
365
366
  rateLimiter?: AxRateLimiterFunction;
366
367
  debug?: boolean;
367
368
  debugHideSystemPrompt?: boolean;
368
- hideThought?: boolean;
369
369
  traceContext?: Context;
370
370
  abortSignal?: AbortSignal;
371
371
  logger?: AxLoggerFunction;
@@ -2259,6 +2259,7 @@ type AxProgramForwardOptions = {
2259
2259
  debug?: boolean;
2260
2260
  debugHideSystemPrompt?: boolean;
2261
2261
  thinkingTokenBudget?: 'minimal' | 'low' | 'medium' | 'high' | 'highest' | 'none';
2262
+ showThoughts?: boolean;
2262
2263
  traceLabel?: string;
2263
2264
  abortSignal?: AbortSignal;
2264
2265
  logger?: AxLoggerFunction;
@@ -3456,7 +3457,7 @@ Readonly<AxAIOpenAIEmbedResponse>> {
3456
3457
  getModelConfig(): Readonly<AxModelConfig>;
3457
3458
  private mapInternalContentToResponsesInput;
3458
3459
  private createResponsesReqInternalInput;
3459
- createChatReq(req: Readonly<AxInternalChatRequest<TModel>>, _config: Readonly<AxAIPromptConfig>): [Readonly<AxAPI>, Readonly<AxAIOpenAIResponsesRequest<TModel>>];
3460
+ createChatReq(req: Readonly<AxInternalChatRequest<TModel>>, config: Readonly<AxAIPromptConfig>): [Readonly<AxAPI>, Readonly<AxAIOpenAIResponsesRequest<TModel>>];
3460
3461
  createChatResp(resp: Readonly<AxAIOpenAIResponsesResponse>): Readonly<AxChatResponse>;
3461
3462
  createChatStreamResp(streamEvent: Readonly<AxAIOpenAIResponsesResponseDelta>): Readonly<AxChatResponse>;
3462
3463
  createEmbedReq(req: Readonly<AxInternalEmbedRequest<TEmbedModel>>): [AxAPI, AxAIOpenAIEmbedRequest<TEmbedModel>];
package/index.d.ts CHANGED
@@ -346,6 +346,7 @@ type AxLoggerFunction = (message: string, options?: {
346
346
  type AxAIPromptConfig = {
347
347
  stream?: boolean;
348
348
  thinkingTokenBudget?: 'minimal' | 'low' | 'medium' | 'high' | 'highest' | 'none';
349
+ showThoughts?: boolean;
349
350
  };
350
351
  type AxAIServiceOptions = {
351
352
  debug?: boolean;
@@ -365,7 +366,6 @@ type AxAIServiceActionOptions<TModel = unknown, TEmbedModel = unknown> = {
365
366
  rateLimiter?: AxRateLimiterFunction;
366
367
  debug?: boolean;
367
368
  debugHideSystemPrompt?: boolean;
368
- hideThought?: boolean;
369
369
  traceContext?: Context;
370
370
  abortSignal?: AbortSignal;
371
371
  logger?: AxLoggerFunction;
@@ -2259,6 +2259,7 @@ type AxProgramForwardOptions = {
2259
2259
  debug?: boolean;
2260
2260
  debugHideSystemPrompt?: boolean;
2261
2261
  thinkingTokenBudget?: 'minimal' | 'low' | 'medium' | 'high' | 'highest' | 'none';
2262
+ showThoughts?: boolean;
2262
2263
  traceLabel?: string;
2263
2264
  abortSignal?: AbortSignal;
2264
2265
  logger?: AxLoggerFunction;
@@ -3456,7 +3457,7 @@ Readonly<AxAIOpenAIEmbedResponse>> {
3456
3457
  getModelConfig(): Readonly<AxModelConfig>;
3457
3458
  private mapInternalContentToResponsesInput;
3458
3459
  private createResponsesReqInternalInput;
3459
- createChatReq(req: Readonly<AxInternalChatRequest<TModel>>, _config: Readonly<AxAIPromptConfig>): [Readonly<AxAPI>, Readonly<AxAIOpenAIResponsesRequest<TModel>>];
3460
+ createChatReq(req: Readonly<AxInternalChatRequest<TModel>>, config: Readonly<AxAIPromptConfig>): [Readonly<AxAPI>, Readonly<AxAIOpenAIResponsesRequest<TModel>>];
3460
3461
  createChatResp(resp: Readonly<AxAIOpenAIResponsesResponse>): Readonly<AxChatResponse>;
3461
3462
  createChatStreamResp(streamEvent: Readonly<AxAIOpenAIResponsesResponseDelta>): Readonly<AxChatResponse>;
3462
3463
  createEmbedReq(req: Readonly<AxInternalEmbedRequest<TEmbedModel>>): [AxAPI, AxAIOpenAIEmbedRequest<TEmbedModel>];
package/index.js CHANGED
@@ -1036,6 +1036,9 @@ var AxBaseAI = class {
1036
1036
  `Model ${model} does not support thinkingTokenBudget.`
1037
1037
  );
1038
1038
  }
1039
+ if (options?.showThoughts && !this.getFeatures(model).hasShowThoughts) {
1040
+ throw new Error(`Model ${model} does not support showThoughts.`);
1041
+ }
1039
1042
  modelConfig.stream = (options?.stream !== void 0 ? options.stream : modelConfig.stream) ?? true;
1040
1043
  const canStream = this.getFeatures(model).streaming;
1041
1044
  if (!canStream) {
@@ -1144,13 +1147,6 @@ var AxBaseAI = class {
1144
1147
  const wrappedRespFn = (state) => (resp) => {
1145
1148
  const res2 = respFn(resp, state);
1146
1149
  res2.sessionId = options?.sessionId;
1147
- if (options?.hideThought) {
1148
- res2.results.forEach((result) => {
1149
- if (result.thought) {
1150
- result.thought = void 0;
1151
- }
1152
- });
1153
- }
1154
1150
  if (!res2.modelUsage) {
1155
1151
  res2.modelUsage = {
1156
1152
  ai: this.name,
@@ -1189,13 +1185,6 @@ var AxBaseAI = class {
1189
1185
  }
1190
1186
  const res = this.aiImpl.createChatResp(rv);
1191
1187
  res.sessionId = options?.sessionId;
1192
- if (options?.hideThought) {
1193
- res.results.forEach((result) => {
1194
- if (result.thought) {
1195
- result.thought = void 0;
1196
- }
1197
- });
1198
- }
1199
1188
  if (!res.modelUsage) {
1200
1189
  const tokenUsage = this.aiImpl.getTokenUsage();
1201
1190
  if (tokenUsage) {
@@ -1416,7 +1405,14 @@ function setChatResponseEvents(res, span, excludeContentFromTrace) {
1416
1405
  if (!res.results) {
1417
1406
  return;
1418
1407
  }
1419
- for (const [index, result] of res.results.entries()) {
1408
+ for (let index = 0; index < res.results.length; index++) {
1409
+ const result = res.results[index];
1410
+ if (!result) {
1411
+ continue;
1412
+ }
1413
+ if (!result.content && !result.thought && !result.functionCalls?.length && !result.finishReason) {
1414
+ continue;
1415
+ }
1420
1416
  const toolCalls = result.functionCalls?.map((call) => {
1421
1417
  return {
1422
1418
  id: call.id,
@@ -3331,6 +3327,9 @@ var AxAIGoogleGeminiImpl = class {
3331
3327
  break;
3332
3328
  }
3333
3329
  }
3330
+ if (config.showThoughts !== void 0) {
3331
+ thinkingConfig.includeThoughts = config.showThoughts;
3332
+ }
3334
3333
  const generationConfig = {
3335
3334
  maxOutputTokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
3336
3335
  temperature: req.modelConfig?.temperature ?? this.config.temperature,
@@ -4119,7 +4118,7 @@ var AxAIOpenAIResponsesImpl = class {
4119
4118
  }
4120
4119
  return items;
4121
4120
  }
4122
- createChatReq(req, _config) {
4121
+ createChatReq(req, config) {
4123
4122
  const model = req.model;
4124
4123
  const apiConfig = { name: "/responses" };
4125
4124
  let instructionsFromPrompt = null;
@@ -4142,6 +4141,10 @@ var AxAIOpenAIResponsesImpl = class {
4142
4141
  parameters: v.parameters ?? {}
4143
4142
  })
4144
4143
  );
4144
+ const includeFields = [];
4145
+ if (config.showThoughts) {
4146
+ includeFields.push("reasoning.encrypted_content");
4147
+ }
4145
4148
  let mutableReq = {
4146
4149
  model,
4147
4150
  input: "",
@@ -4156,7 +4159,7 @@ var AxAIOpenAIResponsesImpl = class {
4156
4159
  // Sourced from modelConfig or global config
4157
4160
  // Optional fields from AxAIOpenAIResponsesRequest that need to be in Mutable for initialization
4158
4161
  background: void 0,
4159
- include: void 0,
4162
+ include: includeFields.length > 0 ? includeFields : void 0,
4160
4163
  metadata: void 0,
4161
4164
  parallel_tool_calls: this.config.parallelToolCalls,
4162
4165
  previous_response_id: void 0,
@@ -4229,9 +4232,13 @@ var AxAIOpenAIResponsesImpl = class {
4229
4232
  break;
4230
4233
  case "reasoning":
4231
4234
  currentResult.id = item.id;
4232
- currentResult.thought = item.summary.map(
4233
- (s) => typeof s === "object" ? JSON.stringify(s) : s
4234
- ).join("\n");
4235
+ if (item.encrypted_content) {
4236
+ currentResult.thought = item.encrypted_content;
4237
+ } else {
4238
+ currentResult.thought = item.summary.map(
4239
+ (s) => typeof s === "object" ? JSON.stringify(s) : s
4240
+ ).join("\n");
4241
+ }
4235
4242
  break;
4236
4243
  case "file_search_call":
4237
4244
  currentResult.id = item.id;
@@ -4549,7 +4556,9 @@ var AxAIOpenAIResponsesImpl = class {
4549
4556
  {
4550
4557
  const reasoningItem = event.item;
4551
4558
  baseResult.id = event.item.id;
4552
- if (reasoningItem.summary) {
4559
+ if (reasoningItem.encrypted_content) {
4560
+ baseResult.thought = reasoningItem.encrypted_content;
4561
+ } else if (reasoningItem.summary) {
4553
4562
  baseResult.thought = reasoningItem.summary.map(
4554
4563
  (s) => typeof s === "object" ? JSON.stringify(s) : s
4555
4564
  ).join("\n");
@@ -8000,7 +8009,8 @@ var AxGen = class extends AxProgramWithSignature {
8000
8009
  stream,
8001
8010
  functions: _functions,
8002
8011
  functionCall: _functionCall,
8003
- thinkingTokenBudget
8012
+ thinkingTokenBudget,
8013
+ showThoughts
8004
8014
  } = options ?? {};
8005
8015
  const chatPrompt = mem?.history(sessionId) ?? [];
8006
8016
  if (chatPrompt.length === 0) {
@@ -8026,6 +8036,7 @@ var AxGen = class extends AxProgramWithSignature {
8026
8036
  stream,
8027
8037
  debug: false,
8028
8038
  thinkingTokenBudget,
8039
+ showThoughts,
8029
8040
  traceContext,
8030
8041
  abortSignal: options?.abortSignal
8031
8042
  }
@@ -8440,6 +8451,7 @@ Content: ${result.content}`
8440
8451
  ...funcNames ? { provided_functions: funcNames } : {},
8441
8452
  ...options?.model ? { model: options.model } : {},
8442
8453
  ...options?.thinkingTokenBudget ? { thinking_token_budget: options.thinkingTokenBudget } : {},
8454
+ ...options?.showThoughts ? { show_thoughts: options.showThoughts } : {},
8443
8455
  ...options?.maxSteps ? { max_steps: options.maxSteps } : {},
8444
8456
  ...options?.maxRetries ? { max_retries: options.maxRetries } : {},
8445
8457
  ...options?.fastFail ? { fast_fail: options.fastFail } : {}