@oh-my-pi/pi-ai 14.7.5 → 14.7.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,15 @@
2
2
 
3
3
  ## [Unreleased]
4
4
 
5
+ ## [14.7.6] - 2026-05-07
6
+
7
+ ### Added
8
+
9
+ - Added `hideThinkingSummary` option to `SimpleStreamOptions`. When true, `streamSimple` requests that the underlying provider omit reasoning/thinking summaries: Anthropic receives `thinking.display = "omitted"` (where supported), and OpenAI Responses / Azure / Codex providers leave `reasoning.summary` unset so the server skips emitting the human-readable summary stream entirely.
10
+
11
+ ### Changed
12
+
13
+ - Changed OpenAI Responses, Azure OpenAI Responses, and OpenAI Codex providers to omit `reasoning.summary` from requests when `reasoningSummary` is explicitly `null` (previously fell back to `"auto"`).
5
14
  ## [14.7.5] - 2026-05-07
6
15
 
7
16
  ### Added
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "type": "module",
3
3
  "name": "@oh-my-pi/pi-ai",
4
- "version": "14.7.5",
4
+ "version": "14.7.6",
5
5
  "description": "Unified LLM API with automatic model discovery and provider configuration",
6
6
  "homepage": "https://github.com/can1357/oh-my-pi",
7
7
  "author": "Can Boluk",
@@ -46,8 +46,8 @@
46
46
  "@aws-sdk/credential-provider-node": "^3.972.39",
47
47
  "@bufbuild/protobuf": "^2.12.0",
48
48
  "@google/genai": "^1.52.0",
49
- "@oh-my-pi/pi-natives": "14.7.5",
50
- "@oh-my-pi/pi-utils": "14.7.5",
49
+ "@oh-my-pi/pi-natives": "14.7.6",
50
+ "@oh-my-pi/pi-utils": "14.7.6",
51
51
  "@sinclair/typebox": "^0.34.49",
52
52
  "@smithy/node-http-handler": "^4.6.1",
53
53
  "ajv": "^8.20.0",
@@ -317,11 +317,14 @@ function buildParams(
317
317
  // See: https://github.com/can1357/oh-my-pi/issues/41
318
318
  params.include = ["reasoning.encrypted_content"];
319
319
 
320
- if (options?.reasoning || options?.reasoningSummary) {
321
- params.reasoning = {
320
+ if (options?.reasoning || options?.reasoningSummary !== undefined) {
321
+ const reasoningParams: NonNullable<typeof params.reasoning> = {
322
322
  effort: options?.reasoning || "medium",
323
- summary: options?.reasoningSummary || "auto",
324
323
  };
324
+ if (options?.reasoningSummary !== null) {
325
+ reasoningParams.summary = options?.reasoningSummary || "auto";
326
+ }
327
+ params.reasoning = reasoningParams;
325
328
  } else {
326
329
  if (model.name.toLowerCase().startsWith("gpt-5")) {
327
330
  // Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7
@@ -4,7 +4,7 @@ import type { Api, Model } from "../../types";
4
4
 
5
5
  export interface ReasoningConfig {
6
6
  effort: "none" | "minimal" | "low" | "medium" | "high" | "xhigh";
7
- summary: "auto" | "concise" | "detailed" | null;
7
+ summary?: "auto" | "concise" | "detailed";
8
8
  }
9
9
 
10
10
  export interface CodexRequestOptions {
@@ -52,11 +52,14 @@ export interface RequestBody {
52
52
  }
53
53
 
54
54
  function getReasoningConfig(model: Model<Api>, options: CodexRequestOptions): ReasoningConfig {
55
- return {
55
+ const config: ReasoningConfig = {
56
56
  effort:
57
57
  options.reasoningEffort === "none" ? "none" : requireSupportedEffort(model, options.reasoningEffort as Effort),
58
- summary: options.reasoningSummary ?? "detailed",
59
58
  };
59
+ if (options.reasoningSummary !== null) {
60
+ config.summary = options.reasoningSummary ?? "detailed";
61
+ }
62
+ return config;
60
63
  }
61
64
 
62
65
  function filterInput(input: InputItem[] | undefined): InputItem[] | undefined {
@@ -430,13 +430,16 @@ function buildParams(
430
430
  // See: https://github.com/can1357/oh-my-pi/issues/41
431
431
  params.include = ["reasoning.encrypted_content"];
432
432
 
433
- if (options?.reasoning || options?.reasoningSummary) {
434
- params.reasoning = {
433
+ if (options?.reasoning || options?.reasoningSummary !== undefined) {
434
+ const reasoningParams: NonNullable<typeof params.reasoning> = {
435
435
  effort: mapReasoningEffort(options?.reasoning || "medium", model.compat?.reasoningEffortMap) as NonNullable<
436
436
  OpenAIResponsesSamplingParams["reasoning"]
437
437
  >["effort"],
438
- summary: options?.reasoningSummary || "auto",
439
438
  };
439
+ if (options?.reasoningSummary !== null) {
440
+ reasoningParams.summary = options?.reasoningSummary || "auto";
441
+ }
442
+ params.reasoning = reasoningParams;
440
443
  } else if (model.name.startsWith("gpt-5")) {
441
444
  // Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7
442
445
  messages.push({
package/src/stream.ts CHANGED
@@ -462,6 +462,7 @@ function mapOptionsForApi<TApi extends Api>(
462
462
  ...base,
463
463
  thinkingEnabled: false,
464
464
  toolChoice: mapAnthropicToolChoice(options?.toolChoice),
465
+ thinkingDisplay: options?.hideThinkingSummary ? "omitted" : undefined,
465
466
  });
466
467
  }
467
468
 
@@ -471,6 +472,7 @@ function mapOptionsForApi<TApi extends Api>(
471
472
  ...base,
472
473
  thinkingEnabled: false,
473
474
  toolChoice: mapAnthropicToolChoice(options?.toolChoice),
475
+ thinkingDisplay: options?.hideThinkingSummary ? "omitted" : undefined,
474
476
  });
475
477
  }
476
478
 
@@ -483,6 +485,7 @@ function mapOptionsForApi<TApi extends Api>(
483
485
  thinkingEnabled: true,
484
486
  effort,
485
487
  toolChoice: mapAnthropicToolChoice(options?.toolChoice),
488
+ thinkingDisplay: options?.hideThinkingSummary ? "omitted" : undefined,
486
489
  });
487
490
  }
488
491
 
@@ -492,6 +495,7 @@ function mapOptionsForApi<TApi extends Api>(
492
495
  thinkingEnabled: true,
493
496
  thinkingBudgetTokens: thinkingBudget,
494
497
  toolChoice: mapAnthropicToolChoice(options?.toolChoice),
498
+ thinkingDisplay: options?.hideThinkingSummary ? "omitted" : undefined,
495
499
  });
496
500
  }
497
501
 
@@ -509,6 +513,7 @@ function mapOptionsForApi<TApi extends Api>(
509
513
  ...base,
510
514
  thinkingEnabled: false,
511
515
  toolChoice: mapAnthropicToolChoice(options?.toolChoice),
516
+ thinkingDisplay: options?.hideThinkingSummary ? "omitted" : undefined,
512
517
  });
513
518
  } else {
514
519
  return castApi<"anthropic-messages">({
@@ -517,6 +522,7 @@ function mapOptionsForApi<TApi extends Api>(
517
522
  thinkingEnabled: true,
518
523
  thinkingBudgetTokens: thinkingBudget,
519
524
  toolChoice: mapAnthropicToolChoice(options?.toolChoice),
525
+ thinkingDisplay: options?.hideThinkingSummary ? "omitted" : undefined,
520
526
  });
521
527
  }
522
528
  }
@@ -564,6 +570,7 @@ function mapOptionsForApi<TApi extends Api>(
564
570
  reasoning: resolveOpenAiReasoningEffort(model, options),
565
571
  toolChoice: mapOpenAiToolChoice(options?.toolChoice),
566
572
  serviceTier: options?.serviceTier,
573
+ reasoningSummary: options?.hideThinkingSummary ? null : undefined,
567
574
  });
568
575
 
569
576
  case "azure-openai-responses":
@@ -572,6 +579,7 @@ function mapOptionsForApi<TApi extends Api>(
572
579
  reasoning: resolveOpenAiReasoningEffort(model, options),
573
580
  toolChoice: mapOpenAiToolChoice(options?.toolChoice),
574
581
  serviceTier: options?.serviceTier,
582
+ reasoningSummary: options?.hideThinkingSummary ? null : undefined,
575
583
  });
576
584
 
577
585
  case "openai-codex-responses":
@@ -581,6 +589,7 @@ function mapOptionsForApi<TApi extends Api>(
581
589
  toolChoice: mapOpenAiToolChoice(options?.toolChoice),
582
590
  serviceTier: options?.serviceTier,
583
591
  preferWebsockets: options?.preferWebsockets,
592
+ reasoningSummary: options?.hideThinkingSummary ? null : undefined,
584
593
  });
585
594
 
586
595
  case "google-generative-ai": {
package/src/types.ts CHANGED
@@ -255,6 +255,14 @@ export interface SimpleStreamOptions extends StreamOptions {
255
255
  * this way when `reasoning` is undefined.
256
256
  */
257
257
  disableReasoning?: boolean;
258
+ /**
259
+ * If true, request that the provider omit thinking/reasoning summaries
260
+ * from the response (e.g. Anthropic `thinking.display = "omitted"`,
261
+ * OpenAI Responses `reasoning.summary` left unset). The model still
262
+ * reasons internally; only the human-readable summary stream is dropped.
263
+ * Useful when the UI hides thinking blocks anyway and the summary is wasted bandwidth.
264
+ */
265
+ hideThinkingSummary?: boolean;
258
266
  /** Custom token budgets for thinking levels (token-based providers only) */
259
267
  thinkingBudgets?: ThinkingBudgets;
260
268
  /** Cursor exec handlers for local tool execution */