klaude-code 2.8.1__py3-none-any.whl → 2.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. klaude_code/app/runtime.py +2 -1
  2. klaude_code/auth/antigravity/oauth.py +33 -38
  3. klaude_code/auth/antigravity/token_manager.py +0 -18
  4. klaude_code/auth/base.py +53 -0
  5. klaude_code/auth/claude/oauth.py +34 -49
  6. klaude_code/auth/codex/exceptions.py +0 -4
  7. klaude_code/auth/codex/oauth.py +32 -28
  8. klaude_code/auth/codex/token_manager.py +0 -18
  9. klaude_code/cli/cost_cmd.py +128 -39
  10. klaude_code/cli/list_model.py +27 -10
  11. klaude_code/cli/main.py +14 -3
  12. klaude_code/config/assets/builtin_config.yaml +25 -24
  13. klaude_code/config/config.py +47 -25
  14. klaude_code/config/sub_agent_model_helper.py +18 -13
  15. klaude_code/config/thinking.py +0 -8
  16. klaude_code/const.py +1 -1
  17. klaude_code/core/agent_profile.py +11 -56
  18. klaude_code/core/compaction/overflow.py +0 -4
  19. klaude_code/core/executor.py +33 -5
  20. klaude_code/core/manager/llm_clients.py +9 -1
  21. klaude_code/core/prompts/prompt-claude-code.md +4 -4
  22. klaude_code/core/reminders.py +21 -23
  23. klaude_code/core/task.py +1 -5
  24. klaude_code/core/tool/__init__.py +3 -2
  25. klaude_code/core/tool/file/apply_patch.py +0 -27
  26. klaude_code/core/tool/file/read_tool.md +3 -2
  27. klaude_code/core/tool/file/read_tool.py +27 -3
  28. klaude_code/core/tool/offload.py +0 -35
  29. klaude_code/core/tool/shell/bash_tool.py +1 -1
  30. klaude_code/core/tool/sub_agent/__init__.py +6 -0
  31. klaude_code/core/tool/sub_agent/image_gen.md +16 -0
  32. klaude_code/core/tool/sub_agent/image_gen.py +146 -0
  33. klaude_code/core/tool/sub_agent/task.md +20 -0
  34. klaude_code/core/tool/sub_agent/task.py +205 -0
  35. klaude_code/core/tool/tool_registry.py +0 -16
  36. klaude_code/core/turn.py +1 -1
  37. klaude_code/llm/anthropic/input.py +6 -5
  38. klaude_code/llm/antigravity/input.py +14 -7
  39. klaude_code/llm/bedrock_anthropic/__init__.py +3 -0
  40. klaude_code/llm/google/client.py +8 -6
  41. klaude_code/llm/google/input.py +20 -12
  42. klaude_code/llm/image.py +18 -11
  43. klaude_code/llm/input_common.py +32 -6
  44. klaude_code/llm/json_stable.py +37 -0
  45. klaude_code/llm/{codex → openai_codex}/__init__.py +1 -1
  46. klaude_code/llm/{codex → openai_codex}/client.py +24 -2
  47. klaude_code/llm/openai_codex/prompt_sync.py +237 -0
  48. klaude_code/llm/openai_compatible/client.py +3 -1
  49. klaude_code/llm/openai_compatible/input.py +0 -10
  50. klaude_code/llm/openai_compatible/stream.py +35 -10
  51. klaude_code/llm/{responses → openai_responses}/client.py +1 -1
  52. klaude_code/llm/{responses → openai_responses}/input.py +15 -5
  53. klaude_code/llm/registry.py +3 -8
  54. klaude_code/llm/stream_parts.py +3 -1
  55. klaude_code/llm/usage.py +1 -9
  56. klaude_code/protocol/events.py +2 -2
  57. klaude_code/protocol/message.py +3 -2
  58. klaude_code/protocol/model.py +34 -2
  59. klaude_code/protocol/op.py +13 -0
  60. klaude_code/protocol/op_handler.py +5 -0
  61. klaude_code/protocol/sub_agent/AGENTS.md +5 -5
  62. klaude_code/protocol/sub_agent/__init__.py +13 -34
  63. klaude_code/protocol/sub_agent/explore.py +7 -34
  64. klaude_code/protocol/sub_agent/image_gen.py +3 -74
  65. klaude_code/protocol/sub_agent/task.py +3 -47
  66. klaude_code/protocol/sub_agent/web.py +8 -52
  67. klaude_code/protocol/tools.py +2 -0
  68. klaude_code/session/session.py +80 -22
  69. klaude_code/session/store.py +0 -4
  70. klaude_code/skill/assets/deslop/SKILL.md +9 -0
  71. klaude_code/skill/system_skills.py +0 -20
  72. klaude_code/tui/command/fork_session_cmd.py +5 -2
  73. klaude_code/tui/command/resume_cmd.py +9 -2
  74. klaude_code/tui/command/sub_agent_model_cmd.py +85 -18
  75. klaude_code/tui/components/assistant.py +0 -26
  76. klaude_code/tui/components/bash_syntax.py +4 -0
  77. klaude_code/tui/components/command_output.py +3 -1
  78. klaude_code/tui/components/developer.py +3 -0
  79. klaude_code/tui/components/diffs.py +4 -209
  80. klaude_code/tui/components/errors.py +4 -0
  81. klaude_code/tui/components/mermaid_viewer.py +2 -2
  82. klaude_code/tui/components/metadata.py +0 -3
  83. klaude_code/tui/components/rich/markdown.py +120 -87
  84. klaude_code/tui/components/rich/status.py +2 -2
  85. klaude_code/tui/components/rich/theme.py +11 -6
  86. klaude_code/tui/components/sub_agent.py +2 -46
  87. klaude_code/tui/components/thinking.py +0 -33
  88. klaude_code/tui/components/tools.py +65 -21
  89. klaude_code/tui/components/user_input.py +2 -0
  90. klaude_code/tui/input/images.py +21 -18
  91. klaude_code/tui/input/key_bindings.py +2 -2
  92. klaude_code/tui/input/prompt_toolkit.py +49 -49
  93. klaude_code/tui/machine.py +29 -47
  94. klaude_code/tui/renderer.py +48 -33
  95. klaude_code/tui/runner.py +2 -1
  96. klaude_code/tui/terminal/image.py +27 -34
  97. klaude_code/ui/common.py +0 -70
  98. {klaude_code-2.8.1.dist-info → klaude_code-2.9.1.dist-info}/METADATA +3 -6
  99. {klaude_code-2.8.1.dist-info → klaude_code-2.9.1.dist-info}/RECORD +103 -99
  100. klaude_code/core/tool/sub_agent_tool.py +0 -126
  101. klaude_code/llm/bedrock/__init__.py +0 -3
  102. klaude_code/llm/openai_compatible/tool_call_accumulator.py +0 -108
  103. klaude_code/tui/components/rich/searchable_text.py +0 -68
  104. /klaude_code/llm/{bedrock → bedrock_anthropic}/client.py +0 -0
  105. /klaude_code/llm/{responses → openai_responses}/__init__.py +0 -0
  106. {klaude_code-2.8.1.dist-info → klaude_code-2.9.1.dist-info}/WHEEL +0 -0
  107. {klaude_code-2.8.1.dist-info → klaude_code-2.9.1.dist-info}/entry_points.txt +0 -0
@@ -234,10 +234,15 @@ def _get_model_params_display(model: ModelConfig) -> list[Text]:
234
234
  return [Text("")]
235
235
 
236
236
 
237
- def _build_provider_info_panel(provider: ProviderConfig, available: bool) -> Quote:
237
+ def _build_provider_info_panel(provider: ProviderConfig, available: bool, *, disabled: bool) -> Quote:
238
238
  """Build a Quote containing provider name and information using a two-column grid."""
239
239
  # Provider name as title
240
- if available:
240
+ if disabled:
241
+ title = Text.assemble(
242
+ (provider.provider_name, ThemeKey.CONFIG_PROVIDER),
243
+ (" (Disabled)", "dim"),
244
+ )
245
+ elif available:
241
246
  title = Text(provider.provider_name, style=ThemeKey.CONFIG_PROVIDER)
242
247
  else:
243
248
  title = Text.assemble(
@@ -297,7 +302,8 @@ def _build_models_table(
297
302
  config: Config,
298
303
  ) -> Table:
299
304
  """Build a table for models under a provider."""
300
- provider_available = not provider.is_api_key_missing()
305
+ provider_disabled = provider.disabled
306
+ provider_available = (not provider_disabled) and (not provider.is_api_key_missing())
301
307
 
302
308
  def _resolve_selector(value: str | None) -> str | None:
303
309
  if not value:
@@ -334,7 +340,15 @@ def _build_models_table(
334
340
  is_last = i == model_count - 1
335
341
  prefix = " └─ " if is_last else " ├─ "
336
342
 
337
- if not provider_available:
343
+ if provider_disabled:
344
+ name = Text.assemble(
345
+ (prefix, ThemeKey.LINES),
346
+ (model.model_name, "dim strike"),
347
+ (" (provider disabled)", "dim"),
348
+ )
349
+ model_id = Text(model.model_id or "", style="dim")
350
+ params = Text("(disabled)", style="dim")
351
+ elif not provider_available:
338
352
  name = Text.assemble((prefix, ThemeKey.LINES), (model.model_name, "dim"))
339
353
  model_id = Text(model.model_id or "", style="dim")
340
354
  params = Text("(unavailable)", style="dim")
@@ -408,19 +422,22 @@ def display_models_and_providers(config: Config, *, show_all: bool = False):
408
422
  _display_agent_models_table(config, console)
409
423
  console.print()
410
424
 
411
- # Sort providers: available (api_key set) first, unavailable (api_key not set) last
412
- sorted_providers = sorted(config.provider_list, key=lambda p: (p.is_api_key_missing(), p.provider_name))
425
+ # Sort providers: enabled+available first, disabled/unavailable last
426
+ sorted_providers = sorted(
427
+ config.provider_list,
428
+ key=lambda p: (p.disabled, p.is_api_key_missing(), p.provider_name),
429
+ )
413
430
 
414
- # Filter out unavailable providers unless show_all is True
431
+ # Filter out disabled/unavailable providers unless show_all is True
415
432
  if not show_all:
416
- sorted_providers = [p for p in sorted_providers if not p.is_api_key_missing()]
433
+ sorted_providers = [p for p in sorted_providers if (not p.disabled) and (not p.is_api_key_missing())]
417
434
 
418
435
  # Display each provider with its models table
419
436
  for provider in sorted_providers:
420
- provider_available = not provider.is_api_key_missing()
437
+ provider_available = (not provider.disabled) and (not provider.is_api_key_missing())
421
438
 
422
439
  # Provider info panel
423
- provider_panel = _build_provider_info_panel(provider, provider_available)
440
+ provider_panel = _build_provider_info_panel(provider, provider_available, disabled=provider.disabled)
424
441
  console.print(provider_panel)
425
442
  console.print()
426
443
 
klaude_code/cli/main.py CHANGED
@@ -227,10 +227,21 @@ def main_callback(
227
227
  log(("Error: --resume <id> cannot be combined with --continue or interactive --resume", "red"))
228
228
  raise typer.Exit(2)
229
229
 
230
+ # Resolve resume_by_id with prefix matching support
230
231
  if resume_by_id_value is not None and not Session.exists(resume_by_id_value):
231
- log((f"Error: session id '{resume_by_id_value}' not found for this project", "red"))
232
- log(("Hint: run `klaude --resume` to select an existing session", "yellow"))
233
- raise typer.Exit(2)
232
+ matches = Session.find_sessions_by_prefix(resume_by_id_value)
233
+ if not matches:
234
+ log((f"Error: session id '{resume_by_id_value}' not found for this project", "red"))
235
+ log(("Hint: run `klaude --resume` to select an existing session", "yellow"))
236
+ raise typer.Exit(2)
237
+ if len(matches) == 1:
238
+ resume_by_id_value = matches[0]
239
+ else:
240
+ # Multiple matches: show interactive selection with filtered list
241
+ selected = select_session_sync(session_ids=matches)
242
+ if selected is None:
243
+ raise typer.Exit(1)
244
+ resume_by_id_value = selected
234
245
 
235
246
  if not sys.stdin.isatty() or not sys.stdout.isatty():
236
247
  log(("Error: interactive mode requires a TTY", "red"))
@@ -36,7 +36,7 @@ provider_list:
36
36
  verbosity: high
37
37
  thinking:
38
38
  reasoning_effort: high
39
- reasoning_summary: detailed
39
+ reasoning_summary: concise
40
40
  cost: {input: 1.75, output: 14, cache_read: 0.17}
41
41
 
42
42
  - model_name: gpt-5.2-medium
@@ -71,7 +71,7 @@ provider_list:
71
71
  context_limit: 400000
72
72
  thinking:
73
73
  reasoning_effort: medium
74
- reasoning_summary: detailed
74
+ reasoning_summary: concise
75
75
  cost: {input: 1.25, output: 10, cache_read: 0.13}
76
76
 
77
77
 
@@ -87,7 +87,7 @@ provider_list:
87
87
  verbosity: high
88
88
  thinking:
89
89
  reasoning_effort: high
90
- reasoning_summary: detailed
90
+ reasoning_summary: concise
91
91
  cost: {input: 1.75, output: 14, cache_read: 0.17}
92
92
 
93
93
  - model_name: gpt-5.2-medium
@@ -148,6 +148,8 @@ provider_list:
148
148
  modalities:
149
149
  - image
150
150
  - text
151
+ image_config:
152
+ image_size: "4K"
151
153
  cost: {input: 2, output: 12, cache_read: 0.2, image: 120}
152
154
 
153
155
  - model_name: nano-banana
@@ -221,6 +223,8 @@ provider_list:
221
223
  modalities:
222
224
  - image
223
225
  - text
226
+ image_config:
227
+ image_size: "4K"
224
228
  cost: {input: 2, output: 12, cache_read: 0.2, image: 120}
225
229
 
226
230
  - model_name: nano-banana
@@ -275,8 +279,22 @@ provider_list:
275
279
  cost: {input: 4, output: 16, cache_read: 1, currency: CNY}
276
280
 
277
281
 
282
+ - provider_name: cerebras
283
+ protocol: openai
284
+ api_key: ${CEREBRAS_API_KEY}
285
+ base_url: https://api.cerebras.ai/v1
286
+ model_list:
287
+
288
+ - model_name: glm
289
+ model_id: zai-glm-4.7
290
+ context_limit: 131072
291
+ max_tokens: 12800
292
+ cost: {input: 2.25, output: 2.75}
293
+
294
+
278
295
  - provider_name: claude-max
279
296
  protocol: claude_oauth
297
+ disabled: true
280
298
  model_list:
281
299
 
282
300
  - model_name: sonnet
@@ -318,7 +336,7 @@ provider_list:
318
336
  verbosity: high
319
337
  thinking:
320
338
  reasoning_effort: high
321
- reasoning_summary: detailed
339
+ reasoning_summary: concise
322
340
  cost: {input: 1.75, output: 14, cache_read: 0.17}
323
341
 
324
342
 
@@ -329,42 +347,25 @@ provider_list:
329
347
  model_id: claude-opus-4-5-thinking
330
348
  context_limit: 200000
331
349
  max_tokens: 64000
332
- thinking:
333
- type: enabled
334
- budget_tokens: 10240
350
+
335
351
  - model_name: sonnet
336
352
  model_id: claude-sonnet-4-5
337
353
  context_limit: 200000
338
354
  max_tokens: 64000
339
- - model_name: sonnet-thinking
340
- model_id: claude-sonnet-4-5-thinking
341
- context_limit: 200000
342
- max_tokens: 64000
343
- thinking:
344
- type: enabled
345
- budget_tokens: 10240
355
+
346
356
  - model_name: gemini-pro-high
347
357
  model_id: gemini-3-pro-high
348
358
  context_limit: 1048576
349
359
  max_tokens: 65535
350
360
  thinking:
351
361
  reasoning_effort: high
352
- - model_name: gemini-pro-low
353
- model_id: gemini-3-pro-low
354
- context_limit: 1048576
355
- max_tokens: 65535
356
- thinking:
357
- reasoning_effort: low
362
+
358
363
  - model_name: gemini-flash
359
364
  model_id: gemini-3-flash
360
365
  context_limit: 1048576
361
366
  max_tokens: 65535
362
367
  thinking:
363
368
  reasoning_effort: medium
364
- - model_name: gpt-oss
365
- model_id: gpt-oss-120b-medium
366
- context_limit: 131072
367
- max_tokens: 32768
368
369
 
369
370
 
370
371
  compact_model: gemini-flash
@@ -44,13 +44,6 @@ def parse_env_var_syntax(value: str | None) -> tuple[str | None, str | None]:
44
44
  return None, value
45
45
 
46
46
 
47
- def is_env_var_syntax(value: str | None) -> bool:
48
- """Check if a value uses ${ENV_VAR} syntax."""
49
- if value is None:
50
- return False
51
- return _ENV_VAR_PATTERN.match(value) is not None
52
-
53
-
54
47
  def resolve_api_key(value: str | None) -> str | None:
55
48
  """Resolve an API key value, expanding ${ENV_VAR} syntax if present."""
56
49
  _, resolved = parse_env_var_syntax(value)
@@ -70,6 +63,7 @@ class ModelConfig(llm_param.LLMConfigModelParameter):
70
63
  class ProviderConfig(llm_param.LLMConfigProviderParameter):
71
64
  """Full provider configuration (used in merged config)."""
72
65
 
66
+ disabled: bool = False
73
67
  model_list: list[ModelConfig] = Field(default_factory=lambda: [])
74
68
 
75
69
  def get_resolved_api_key(self) -> str | None:
@@ -141,6 +135,7 @@ class UserProviderConfig(BaseModel):
141
135
 
142
136
  provider_name: str
143
137
  protocol: llm_param.LLMClientProtocol | None = None
138
+ disabled: bool = False
144
139
  base_url: str | None = None
145
140
  api_key: str | None = None
146
141
  is_azure: bool = False
@@ -290,6 +285,9 @@ class Config(BaseModel):
290
285
  if requested_provider is not None and provider.provider_name.casefold() != requested_provider.casefold():
291
286
  continue
292
287
 
288
+ if provider.disabled:
289
+ continue
290
+
293
291
  api_key = provider.get_resolved_api_key()
294
292
  if (
295
293
  provider.protocol
@@ -303,7 +301,11 @@ class Config(BaseModel):
303
301
  ):
304
302
  continue
305
303
 
306
- if any(m.model_name == requested_model for m in provider.model_list):
304
+ for model in provider.model_list:
305
+ if model.model_name != requested_model:
306
+ continue
307
+ if model.disabled:
308
+ continue
307
309
  return requested_model, provider.provider_name
308
310
 
309
311
  return None
@@ -315,6 +317,11 @@ class Config(BaseModel):
315
317
  if requested_provider is not None and provider.provider_name.casefold() != requested_provider.casefold():
316
318
  continue
317
319
 
320
+ if provider.disabled:
321
+ if requested_provider is not None:
322
+ raise ValueError(f"Provider '{provider.provider_name}' is disabled for: {model_name}")
323
+ continue
324
+
318
325
  # Resolve ${ENV_VAR} syntax for api_key
319
326
  api_key = provider.get_resolved_api_key()
320
327
 
@@ -339,7 +346,15 @@ class Config(BaseModel):
339
346
  for model in provider.model_list:
340
347
  if model.model_name != requested_model:
341
348
  continue
342
- provider_dump = provider.model_dump(exclude={"model_list"})
349
+
350
+ if model.disabled:
351
+ if requested_provider is not None:
352
+ raise ValueError(
353
+ f"Model '{requested_model}' is disabled in provider '{provider.provider_name}' for: {model_name}"
354
+ )
355
+ break
356
+
357
+ provider_dump = provider.model_dump(exclude={"model_list", "disabled"})
343
358
  provider_dump["api_key"] = api_key
344
359
  return llm_param.LLMConfigParameter(
345
360
  **provider_dump,
@@ -353,7 +368,7 @@ class Config(BaseModel):
353
368
 
354
369
  Args:
355
370
  only_available: If True, only return models from providers with valid API keys.
356
- include_disabled: If False, exclude models with disabled=True.
371
+ include_disabled: If False, exclude models/providers with disabled=True.
357
372
  """
358
373
  return [
359
374
  ModelEntry(
@@ -362,7 +377,8 @@ class Config(BaseModel):
362
377
  **model.model_dump(exclude={"model_name"}),
363
378
  )
364
379
  for provider in self.provider_list
365
- if not only_available or not provider.is_api_key_missing()
380
+ if include_disabled or not provider.disabled
381
+ if not only_available or (not provider.disabled and not provider.is_api_key_missing())
366
382
  for model in provider.model_list
367
383
  if include_disabled or not model.disabled
368
384
  ]
@@ -374,13 +390,6 @@ class Config(BaseModel):
374
390
  return True
375
391
  return False
376
392
 
377
- def get_first_available_nano_banana_model(self) -> str | None:
378
- """Get the first available nano-banana model, or None."""
379
- for entry in self.iter_model_entries(only_available=True, include_disabled=False):
380
- if "nano-banana" in entry.model_name:
381
- return entry.model_name
382
- return None
383
-
384
393
  def get_first_available_image_model(self) -> str | None:
385
394
  """Get the first available image generation model, or None."""
386
395
  for entry in self.iter_model_entries(only_available=True, include_disabled=False):
@@ -406,7 +415,21 @@ class Config(BaseModel):
406
415
  user_config.theme = self.theme
407
416
  # Note: provider_list is NOT synced - user providers are already in user_config
408
417
 
409
- config_dict = user_config.model_dump(mode="json", exclude_none=True, exclude_defaults=True)
418
+ # Keep the saved file compact (exclude defaults), but preserve explicit
419
+ # overrides inside provider_list (e.g. `disabled: false` to re-enable a
420
+ # builtin provider that is disabled by default).
421
+ config_dict = user_config.model_dump(
422
+ mode="json",
423
+ exclude_none=True,
424
+ exclude_defaults=True,
425
+ exclude={"provider_list"},
426
+ )
427
+
428
+ provider_list = [
429
+ p.model_dump(mode="json", exclude_none=True, exclude_unset=True) for p in (user_config.provider_list or [])
430
+ ]
431
+ if provider_list:
432
+ config_dict["provider_list"] = provider_list
410
433
 
411
434
  def _save_config() -> None:
412
435
  config_path.parent.mkdir(parents=True, exist_ok=True)
@@ -454,12 +477,12 @@ def _get_builtin_config() -> Config:
454
477
  def _merge_model(builtin: ModelConfig, user: ModelConfig) -> ModelConfig:
455
478
  """Merge user model config with builtin model config.
456
479
 
457
- Strategy: user values take precedence if explicitly set (not default).
458
- This allows users to override specific fields (e.g., disabled=true)
480
+ Strategy: user values take precedence if explicitly set (not unset).
481
+ This allows users to override specific fields (e.g., disabled=true/false)
459
482
  without losing other builtin settings (e.g., model_id, max_tokens).
460
483
  """
461
484
  merged_data = builtin.model_dump()
462
- user_data = user.model_dump(exclude_defaults=True, exclude={"model_name"})
485
+ user_data = user.model_dump(exclude_unset=True, exclude={"model_name"})
463
486
  for key, value in user_data.items():
464
487
  if value is not None:
465
488
  merged_data[key] = value
@@ -485,10 +508,9 @@ def _merge_provider(builtin: ProviderConfig, user: UserProviderConfig) -> Provid
485
508
  # New model from user
486
509
  merged_models[m.model_name] = m
487
510
 
488
- # For other fields, use user values if explicitly set, otherwise use builtin
489
- # We check if user explicitly provided a value by comparing to defaults
511
+ # For other fields, use user values if explicitly set, otherwise use builtin.
490
512
  merged_data = builtin.model_dump()
491
- user_data = user.model_dump(exclude_defaults=True, exclude={"model_list"})
513
+ user_data = user.model_dump(exclude_unset=True, exclude={"model_list"})
492
514
 
493
515
  # Update with user's explicit settings
494
516
  for key, value in user_data.items():
@@ -5,13 +5,12 @@ from __future__ import annotations
5
5
  from dataclasses import dataclass
6
6
  from typing import TYPE_CHECKING
7
7
 
8
+ from klaude_code.protocol import tools
8
9
  from klaude_code.protocol.sub_agent import (
9
10
  AVAILABILITY_IMAGE_MODEL,
10
11
  SubAgentProfile,
11
12
  get_sub_agent_profile,
12
- get_sub_agent_profile_by_tool,
13
13
  iter_sub_agent_profiles,
14
- sub_agent_tool_names,
15
14
  )
16
15
  from klaude_code.protocol.tools import SubAgentType
17
16
 
@@ -30,7 +29,7 @@ class SubAgentModelInfo:
30
29
  # Effective model name used by this sub-agent.
31
30
  # - When configured_model is set: equals configured_model.
32
31
  # - When requirement-based default applies (e.g. ImageGen): resolved model.
33
- # - When inheriting from main agent: None.
32
+ # - When inheriting from defaults: resolved model name.
34
33
  effective_model: str | None
35
34
 
36
35
 
@@ -106,10 +105,11 @@ class SubAgentModelHelper:
106
105
  ) -> EmptySubAgentModelBehavior:
107
106
  """Describe what happens when a sub-agent model is not configured.
108
107
 
109
- Most sub-agents default to inheriting the main model.
108
+ Most sub-agents default to the Task model if configured, otherwise
109
+ they inherit the main model.
110
110
 
111
111
  Sub-agents with an availability requirement (e.g. ImageGen) do NOT
112
- inherit from the main model; instead they auto-resolve a suitable model
112
+ inherit from Task/main; instead they auto-resolve a suitable model
113
113
  (currently: the first available image model).
114
114
  """
115
115
 
@@ -117,9 +117,11 @@ class SubAgentModelHelper:
117
117
 
118
118
  requirement = profile.availability_requirement
119
119
  if requirement is None:
120
+ task_model = self._config.sub_agent_models.get(tools.TASK)
121
+ resolved = task_model or main_model_name
120
122
  return EmptySubAgentModelBehavior(
121
- description=f"inherit from main agent: {main_model_name}",
122
- resolved_model_name=main_model_name,
123
+ description=f"use default behavior: {resolved}",
124
+ resolved_model_name=resolved,
123
125
  )
124
126
 
125
127
  resolved = self.resolve_model_for_requirement(requirement)
@@ -154,13 +156,15 @@ class SubAgentModelHelper:
154
156
  For sub-agents without explicit config, resolves model based on availability_requirement.
155
157
  """
156
158
  result: list[SubAgentModelInfo] = []
157
- for profile in iter_sub_agent_profiles(enabled_only=True):
159
+ for profile in iter_sub_agent_profiles():
158
160
  if not self.check_availability_requirement(profile.availability_requirement):
159
161
  continue
160
162
  configured_model = self._config.sub_agent_models.get(profile.name)
161
163
  effective_model = configured_model
162
164
  if not effective_model and profile.availability_requirement:
163
165
  effective_model = self.resolve_model_for_requirement(profile.availability_requirement)
166
+ if not effective_model and profile.availability_requirement is None:
167
+ effective_model = self._config.sub_agent_models.get(tools.TASK) or self._config.main_model
164
168
  result.append(
165
169
  SubAgentModelInfo(
166
170
  profile=profile,
@@ -189,11 +193,9 @@ class SubAgentModelHelper:
189
193
 
190
194
  def get_enabled_sub_agent_tool_names(self) -> list[str]:
191
195
  """Return sub-agent tool names that should be added to main agent's tool list."""
192
- result: list[str] = []
193
- for name in sub_agent_tool_names(enabled_only=True):
194
- profile = get_sub_agent_profile_by_tool(name)
195
- if profile is not None and self.check_availability_requirement(profile.availability_requirement):
196
- result.append(name)
196
+ result: list[str] = [tools.TASK]
197
+ if self.check_availability_requirement(AVAILABILITY_IMAGE_MODEL):
198
+ result.append(tools.IMAGE_GEN)
197
199
  return result
198
200
 
199
201
  def build_sub_agent_client_configs(self) -> dict[SubAgentType, str]:
@@ -205,4 +207,7 @@ class SubAgentModelHelper:
205
207
  model_name = self.resolve_model_for_requirement(profile.availability_requirement)
206
208
  if model_name:
207
209
  result[profile.name] = model_name
210
+ task_model = self._config.sub_agent_models.get(tools.TASK)
211
+ if task_model:
212
+ result.setdefault(tools.TASK, task_model)
208
213
  return result
@@ -62,14 +62,6 @@ def _is_gemini_flash_model(model_name: str | None) -> bool:
62
62
  return "gemini-3-flash" in model_name.lower()
63
63
 
64
64
 
65
- def should_auto_trigger_thinking(model_name: str | None) -> bool:
66
- """Check if model should auto-trigger thinking selection on switch."""
67
- if not model_name:
68
- return False
69
- model_lower = model_name.lower()
70
- return "gpt-5" in model_lower or "gemini-3" in model_lower or "opus" in model_lower
71
-
72
-
73
65
  def get_levels_for_responses(model_name: str | None) -> list[str]:
74
66
  """Get thinking levels for responses protocol."""
75
67
  if _is_codex_max_model(model_name):
klaude_code/const.py CHANGED
@@ -141,7 +141,7 @@ MIN_HIDDEN_LINES_FOR_INDICATOR = 5 # Minimum hidden lines before showing trunca
141
141
  SUB_AGENT_RESULT_MAX_LINES = 10 # Maximum lines for sub-agent result display
142
142
  TRUNCATE_HEAD_MAX_LINES = 2 # Maximum lines for sub-agent error display
143
143
  BASH_OUTPUT_PANEL_THRESHOLD = 10 # Bash output line threshold for CodePanel display
144
- BASH_MULTILINE_STRING_TRUNCATE_MAX_LINES = 2 # Max lines shown for heredoc / multiline string tokens in bash tool calls
144
+ BASH_MULTILINE_STRING_TRUNCATE_MAX_LINES = 4 # Max lines shown for heredoc / multiline string tokens in bash tool calls
145
145
  URL_TRUNCATE_MAX_LENGTH = 400 # Maximum length for URL truncation in display
146
146
  QUERY_DISPLAY_TRUNCATE_LENGTH = 80 # Maximum length for search query display
147
147
  NOTIFY_COMPACT_LIMIT = 160 # Maximum length for notification body text
@@ -12,12 +12,10 @@ from typing import TYPE_CHECKING, Any, Protocol
12
12
  if TYPE_CHECKING:
13
13
  from klaude_code.config.config import Config
14
14
 
15
- from klaude_code.auth.codex.exceptions import CodexUnsupportedModelError
16
15
  from klaude_code.config.sub_agent_model_helper import SubAgentModelHelper
17
16
  from klaude_code.core.reminders import (
18
17
  at_file_reader_reminder,
19
18
  empty_todo_reminder,
20
- file_changed_externally_reminder,
21
19
  image_reminder,
22
20
  last_path_memory_reminder,
23
21
  memory_reminder,
@@ -28,7 +26,7 @@ from klaude_code.core.tool.report_back_tool import ReportBackTool
28
26
  from klaude_code.core.tool.tool_registry import get_tool_schemas
29
27
  from klaude_code.llm import LLMClientABC
30
28
  from klaude_code.protocol import llm_param, message, tools
31
- from klaude_code.protocol.sub_agent import get_sub_agent_profile
29
+ from klaude_code.protocol.sub_agent import AVAILABILITY_IMAGE_MODEL, get_sub_agent_profile
32
30
  from klaude_code.session import Session
33
31
 
34
32
  type Reminder = Callable[[Session], Awaitable[message.DeveloperMessage | None]]
@@ -54,12 +52,6 @@ COMMAND_DESCRIPTIONS: dict[str, str] = {
54
52
  }
55
53
 
56
54
 
57
- # Prompts for codex_oauth protocol - must be used exactly as-is without any additions.
58
- CODEX_OAUTH_PROMPTS: dict[str, str] = {
59
- "gpt-5.2-codex": "prompts/prompt-codex-gpt-5-2-codex.md",
60
- "gpt-5.2": "prompts/prompt-codex-gpt-5-2.md",
61
- }
62
-
63
55
  # Prompt for antigravity protocol - used exactly as-is without any additions.
64
56
  ANTIGRAVITY_PROMPT_PATH = "prompts/prompt-antigravity.md"
65
57
 
@@ -73,20 +65,6 @@ Only the content passed to `report_back` will be returned to user.\
73
65
  """
74
66
 
75
67
 
76
- SUB_AGENT_COMMON_PROMPT_FOR_MAIN_AGENT = """\
77
-
78
- # Sub-agent capabilities
79
- You have sub-agents (e.g. Task, Explore, WebAgent, ImageGen) with structured output and resume capabilites:
80
- - Agents can be provided with an `output_format` (JSON Schema) parameter for structured output
81
- - Example: `output_format={"type": "object", "properties": {"files": {"type": "array", "items": {"type": "string"}, "description": "List of file paths that match the search criteria, e.g. ['src/main.py', 'src/utils/helper.py']"}}, "required": ["files"]}`
82
- - Agents can be resumed using the `resume` parameter by passing the agent ID from a previous invocation. \
83
- When resumed, the agent continues with its full previous context preserved. \
84
- When NOT resuming, each invocation starts fresh and you should provide a detailed task description with all necessary context.
85
- - When the agent is done, it will return a single message back to you along with its agent ID. \
86
- You can use this ID to resume the agent later if needed for follow-up work.
87
- """
88
-
89
-
90
68
  @cache
91
69
  def _load_prompt_by_path(prompt_path: str) -> str:
92
70
  """Load and cache prompt content from a file path relative to core package."""
@@ -144,19 +122,6 @@ def _build_env_info(model_name: str) -> str:
144
122
  return "\n".join(env_lines)
145
123
 
146
124
 
147
- def _has_sub_agents(config: Config | None) -> bool:
148
- """Check if there are any sub-agent tools available for the main agent."""
149
- if config is not None:
150
- from klaude_code.config.sub_agent_model_helper import SubAgentModelHelper
151
-
152
- helper = SubAgentModelHelper(config)
153
- return bool(helper.get_enabled_sub_agent_tool_names())
154
-
155
- from klaude_code.protocol.sub_agent import sub_agent_tool_names
156
-
157
- return bool(sub_agent_tool_names(enabled_only=True))
158
-
159
-
160
125
  def load_system_prompt(
161
126
  model_name: str,
162
127
  protocol: llm_param.LLMClientProtocol,
@@ -165,12 +130,11 @@ def load_system_prompt(
165
130
  ) -> str:
166
131
  """Get system prompt content for the given model and sub-agent type."""
167
132
 
168
- # For codex_oauth protocol, use exact prompts without any additions.
133
+ # For codex_oauth protocol, use dynamic prompts from GitHub (no additions).
169
134
  if protocol == llm_param.LLMClientProtocol.CODEX_OAUTH:
170
- for model_key, prompt_path in CODEX_OAUTH_PROMPTS.items():
171
- if model_key in model_name:
172
- return _load_prompt_by_path(prompt_path)
173
- raise CodexUnsupportedModelError(f"codex_oauth protocol does not support model: {model_name}")
135
+ from klaude_code.llm.openai_codex.prompt_sync import get_codex_instructions
136
+
137
+ return get_codex_instructions(model_name)
174
138
 
175
139
  # For antigravity protocol, use exact prompt without any additions.
176
140
  if protocol == llm_param.LLMClientProtocol.ANTIGRAVITY:
@@ -183,18 +147,13 @@ def load_system_prompt(
183
147
  base_prompt = _load_prompt_by_model(model_name)
184
148
 
185
149
  skills_prompt = ""
186
- sub_agent_prompt = ""
187
150
  if sub_agent_type is None:
188
151
  # Skills are progressive-disclosure: keep only metadata in the system prompt.
189
152
  from klaude_code.skill.manager import format_available_skills_for_system_prompt
190
153
 
191
154
  skills_prompt = format_available_skills_for_system_prompt()
192
155
 
193
- # Add sub-agent resume instructions if there are sub-agent tools available.
194
- if _has_sub_agents(config):
195
- sub_agent_prompt = "\n" + SUB_AGENT_COMMON_PROMPT_FOR_MAIN_AGENT
196
-
197
- return base_prompt + _build_env_info(model_name) + skills_prompt + sub_agent_prompt
156
+ return base_prompt + _build_env_info(model_name) + skills_prompt
198
157
 
199
158
 
200
159
  def load_agent_tools(
@@ -217,21 +176,18 @@ def load_agent_tools(
217
176
  # Main agent tools
218
177
  if "gpt-5" in model_name:
219
178
  tool_names: list[str] = [tools.BASH, tools.READ, tools.APPLY_PATCH, tools.UPDATE_PLAN]
220
- elif "gemini-3" in model_name:
221
- tool_names = [tools.BASH, tools.READ, tools.EDIT, tools.WRITE]
222
179
  else:
223
180
  tool_names = [tools.BASH, tools.READ, tools.EDIT, tools.WRITE, tools.TODO_WRITE]
224
181
 
182
+ tool_names.append(tools.TASK)
225
183
  if config is not None:
226
184
  helper = SubAgentModelHelper(config)
227
- tool_names.extend(helper.get_enabled_sub_agent_tool_names())
185
+ if helper.check_availability_requirement(AVAILABILITY_IMAGE_MODEL):
186
+ tool_names.append(tools.IMAGE_GEN)
228
187
  else:
229
- from klaude_code.protocol.sub_agent import sub_agent_tool_names
230
-
231
- tool_names.extend(sub_agent_tool_names(enabled_only=True))
188
+ tool_names.append(tools.IMAGE_GEN)
232
189
 
233
- tool_names.extend([tools.MERMAID])
234
- # tool_names.extend([tools.MEMORY])
190
+ tool_names.append(tools.MERMAID)
235
191
  return get_tool_schemas(tool_names)
236
192
 
237
193
 
@@ -258,7 +214,6 @@ def load_agent_reminders(
258
214
  memory_reminder,
259
215
  at_file_reader_reminder,
260
216
  last_path_memory_reminder,
261
- file_changed_externally_reminder,
262
217
  image_reminder,
263
218
  skill_reminder,
264
219
  ]
@@ -24,7 +24,3 @@ def is_context_overflow(error_message: str | None) -> bool:
24
24
  if _STATUS_CODE_PATTERN.search(error_message):
25
25
  return True
26
26
  return any(pattern.search(error_message) for pattern in _OVERFLOW_PATTERNS)
27
-
28
-
29
- def get_overflow_patterns() -> list[re.Pattern[str]]:
30
- return list(_OVERFLOW_PATTERNS)