klaude-code 2.5.3__py3-none-any.whl → 2.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. klaude_code/auth/__init__.py +10 -0
  2. klaude_code/auth/env.py +77 -0
  3. klaude_code/cli/auth_cmd.py +87 -8
  4. klaude_code/cli/config_cmd.py +5 -5
  5. klaude_code/cli/cost_cmd.py +159 -60
  6. klaude_code/cli/main.py +52 -61
  7. klaude_code/cli/self_update.py +7 -7
  8. klaude_code/config/builtin_config.py +23 -9
  9. klaude_code/config/config.py +19 -9
  10. klaude_code/core/turn.py +7 -8
  11. klaude_code/llm/google/client.py +12 -0
  12. klaude_code/llm/openai_compatible/stream.py +5 -1
  13. klaude_code/llm/openrouter/client.py +1 -0
  14. klaude_code/protocol/events.py +214 -0
  15. klaude_code/protocol/sub_agent/image_gen.py +0 -4
  16. klaude_code/session/session.py +51 -18
  17. klaude_code/tui/commands.py +0 -5
  18. klaude_code/tui/components/metadata.py +4 -5
  19. klaude_code/tui/components/sub_agent.py +6 -0
  20. klaude_code/tui/display.py +11 -1
  21. klaude_code/tui/input/completers.py +11 -7
  22. klaude_code/tui/machine.py +89 -55
  23. klaude_code/tui/renderer.py +1 -62
  24. {klaude_code-2.5.3.dist-info → klaude_code-2.6.0.dist-info}/METADATA +23 -31
  25. {klaude_code-2.5.3.dist-info → klaude_code-2.6.0.dist-info}/RECORD +27 -34
  26. klaude_code/cli/session_cmd.py +0 -87
  27. klaude_code/protocol/events/__init__.py +0 -63
  28. klaude_code/protocol/events/base.py +0 -18
  29. klaude_code/protocol/events/chat.py +0 -30
  30. klaude_code/protocol/events/lifecycle.py +0 -23
  31. klaude_code/protocol/events/metadata.py +0 -16
  32. klaude_code/protocol/events/streaming.py +0 -43
  33. klaude_code/protocol/events/system.py +0 -56
  34. klaude_code/protocol/events/tools.py +0 -27
  35. {klaude_code-2.5.3.dist-info → klaude_code-2.6.0.dist-info}/WHEEL +0 -0
  36. {klaude_code-2.5.3.dist-info → klaude_code-2.6.0.dist-info}/entry_points.txt +0 -0
klaude_code/cli/main.py CHANGED
@@ -7,52 +7,38 @@ from klaude_code.cli.auth_cmd import register_auth_commands
7
7
  from klaude_code.cli.config_cmd import register_config_commands
8
8
  from klaude_code.cli.cost_cmd import register_cost_commands
9
9
  from klaude_code.cli.debug import DEBUG_FILTER_HELP, prepare_debug_logging
10
- from klaude_code.cli.self_update import register_self_update_commands, version_option_callback
11
- from klaude_code.cli.session_cmd import register_session_commands
10
+ from klaude_code.cli.self_update import register_self_upgrade_commands, version_option_callback
12
11
  from klaude_code.session import Session
13
12
  from klaude_code.tui.command.resume_cmd import select_session_sync
14
13
  from klaude_code.ui.terminal.title import update_terminal_title
15
14
 
16
- ENV_HELP_LINES = [
17
- "Environment Variables:",
18
- "",
19
- "Provider API keys (built-in config):",
20
- " ANTHROPIC_API_KEY Anthropic API key",
21
- " OPENAI_API_KEY OpenAI API key",
22
- " OPENROUTER_API_KEY OpenRouter API key",
23
- " GOOGLE_API_KEY Google API key (Gemini)",
24
- " DEEPSEEK_API_KEY DeepSeek API key",
25
- " MOONSHOT_API_KEY Moonshot API key (Kimi)",
26
- "",
27
- "AWS credentials (Bedrock):",
28
- " AWS_ACCESS_KEY_ID AWS access key id",
29
- " AWS_SECRET_ACCESS_KEY AWS secret access key",
30
- " AWS_REGION AWS region",
31
- "",
32
- "Tool limits (Read):",
33
- " KLAUDE_READ_GLOBAL_LINE_CAP Max lines to read (default: 2000)",
34
- " KLAUDE_READ_MAX_CHARS Max total chars to read (default: 50000)",
35
- " KLAUDE_READ_MAX_IMAGE_BYTES Max image bytes to read (default: 4MB)",
36
- " KLAUDE_IMAGE_OUTPUT_MAX_BYTES Max decoded image bytes (default: 64MB)",
37
- "",
38
- "Notifications / testing:",
39
- " KLAUDE_NOTIFY Set to 0/off/false/disable(d) to disable task notifications",
40
- " KLAUDE_TEST_SIGNAL In tmux, emit `tmux wait-for -S <channel>` on task completion",
41
- " TMUX Auto-detected; required for KLAUDE_TEST_SIGNAL",
42
- "",
43
- "Editor / terminal integration:",
44
- " EDITOR Preferred editor for `klaude config`",
45
- " TERM Terminal identification (auto-detected)",
46
- " TERM_PROGRAM Terminal identification (auto-detected)",
47
- " WT_SESSION Terminal hint (auto-detected)",
48
- " VTE_VERSION Terminal hint (auto-detected)",
49
- " GHOSTTY_RESOURCES_DIR Ghostty detection (auto-detected)",
50
- "",
51
- "Compatibility:",
52
- " ANTHROPIC_AUTH_TOKEN Reserved by anthropic SDK; temporarily unset during client init",
53
- ]
54
-
55
- ENV_HELP = "\n\n".join(ENV_HELP_LINES)
15
+
16
+ def _build_env_help() -> str:
17
+ from klaude_code.config.builtin_config import SUPPORTED_API_KEYS
18
+
19
+ lines = [
20
+ "Environment Variables:",
21
+ "",
22
+ "Provider API keys (built-in config):",
23
+ ]
24
+ # Calculate max env_var length for alignment
25
+ max_len = max(len(k.env_var) for k in SUPPORTED_API_KEYS)
26
+ for k in SUPPORTED_API_KEYS:
27
+ lines.append(f" {k.env_var:<{max_len}} {k.description}")
28
+ lines.extend(
29
+ [
30
+ "",
31
+ "Tool limits (Read):",
32
+ " KLAUDE_READ_GLOBAL_LINE_CAP Max lines to read (default: 2000)",
33
+ " KLAUDE_READ_MAX_CHARS Max total chars to read (default: 50000)",
34
+ " KLAUDE_READ_MAX_IMAGE_BYTES Max image bytes to read (default: 4MB)",
35
+ " KLAUDE_IMAGE_OUTPUT_MAX_BYTES Max decoded image bytes (default: 64MB)",
36
+ ]
37
+ )
38
+ return "\n\n".join(lines)
39
+
40
+
41
+ ENV_HELP = _build_env_help()
56
42
 
57
43
  app = typer.Typer(
58
44
  add_completion=False,
@@ -60,55 +46,51 @@ app = typer.Typer(
60
46
  no_args_is_help=False,
61
47
  rich_markup_mode="rich",
62
48
  epilog=ENV_HELP,
49
+ context_settings={"help_option_names": ["-h", "--help"]},
63
50
  )
64
51
 
65
52
  # Register subcommands from modules
66
- register_session_commands(app)
67
53
  register_auth_commands(app)
68
54
  register_config_commands(app)
69
55
  register_cost_commands(app)
56
+ register_self_upgrade_commands(app)
57
+
70
58
 
71
- register_self_update_commands(app)
59
+ @app.command("help", hidden=True)
60
+ def help_command(ctx: typer.Context) -> None:
61
+ """Show help message."""
62
+ print(ctx.parent.get_help() if ctx.parent else ctx.get_help())
72
63
 
73
64
 
74
65
  @app.callback(invoke_without_command=True)
75
66
  def main_callback(
76
67
  ctx: typer.Context,
77
- version: bool = typer.Option(
78
- False,
79
- "--version",
80
- "-V",
81
- "-v",
82
- help="Show version and exit",
83
- callback=version_option_callback,
84
- is_eager=True,
85
- ),
86
68
  model: str | None = typer.Option(
87
69
  None,
88
70
  "--model",
89
71
  "-m",
90
- help="Override model config name (uses main model by default)",
72
+ help="Select model by name",
91
73
  rich_help_panel="LLM",
92
74
  ),
93
- continue_: bool = typer.Option(False, "--continue", "-c", help="Continue from latest session"),
94
- resume: bool = typer.Option(False, "--resume", "-r", help="Select a session to resume for this project"),
75
+ continue_: bool = typer.Option(False, "--continue", "-c", help="Resume latest session"),
76
+ resume: bool = typer.Option(False, "--resume", "-r", help="Pick a session to resume"),
95
77
  resume_by_id: str | None = typer.Option(
96
78
  None,
97
79
  "--resume-by-id",
98
- help="Resume a session by its ID (must exist)",
80
+ help="Resume session by ID",
99
81
  ),
100
82
  select_model: bool = typer.Option(
101
83
  False,
102
84
  "--select-model",
103
85
  "-s",
104
- help="Interactively choose a model at startup",
86
+ help="Choose model interactively",
105
87
  rich_help_panel="LLM",
106
88
  ),
107
89
  debug: bool = typer.Option(
108
90
  False,
109
91
  "--debug",
110
92
  "-d",
111
- help="Enable debug mode",
93
+ help="Enable debug logging",
112
94
  rich_help_panel="Debug",
113
95
  ),
114
96
  debug_filter: str | None = typer.Option(
@@ -120,14 +102,23 @@ def main_callback(
120
102
  vanilla: bool = typer.Option(
121
103
  False,
122
104
  "--vanilla",
123
- help="Vanilla mode exposes the model's raw API behavior: it provides only minimal tools (Bash, Read, Write & Edit) and omits system prompts and reminders.",
105
+ help="Minimal mode: basic tools only, no system prompts",
124
106
  ),
125
107
  banana: bool = typer.Option(
126
108
  False,
127
109
  "--banana",
128
- help="Image generation mode with Nano Banana",
110
+ help="Image generation mode",
129
111
  rich_help_panel="LLM",
130
112
  ),
113
+ version: bool = typer.Option(
114
+ False,
115
+ "--version",
116
+ "-V",
117
+ "-v",
118
+ help="Show version and exit",
119
+ callback=version_option_callback,
120
+ is_eager=True,
121
+ ),
131
122
  ) -> None:
132
123
  # Only run interactive mode when no subcommand is invoked
133
124
  if ctx.invoked_subcommand is None:
@@ -35,14 +35,14 @@ def version_command() -> None:
35
35
  _print_version()
36
36
 
37
37
 
38
- def update_command(
38
+ def upgrade_command(
39
39
  check: bool = typer.Option(
40
40
  False,
41
41
  "--check",
42
- help="Check for updates and exit without upgrading",
42
+ help="Check only, don't upgrade",
43
43
  ),
44
44
  ) -> None:
45
- """Upgrade klaude-code when installed via `uv tool`."""
45
+ """Upgrade to latest version"""
46
46
 
47
47
  info = check_for_updates_blocking()
48
48
 
@@ -79,9 +79,9 @@ def update_command(
79
79
  log("Update complete. Please re-run `klaude` to use the new version.")
80
80
 
81
81
 
82
- def register_self_update_commands(app: typer.Typer) -> None:
82
+ def register_self_upgrade_commands(app: typer.Typer) -> None:
83
83
  """Register self-update and version subcommands to the given Typer app."""
84
84
 
85
- app.command("update")(update_command)
86
- app.command("upgrade", help="Alias for `klaude update`.")(update_command)
87
- app.command("version", help="Alias for `klaude --version`.")(version_command)
85
+ app.command("upgrade")(upgrade_command)
86
+ app.command("update", hidden=True)(upgrade_command)
87
+ app.command("version", hidden=True)(version_command)
@@ -5,6 +5,7 @@ environment variables (ANTHROPIC_API_KEY, OPENAI_API_KEY, etc.) without
5
5
  manually configuring providers.
6
6
  """
7
7
 
8
+ from dataclasses import dataclass
8
9
  from functools import lru_cache
9
10
  from importlib import resources
10
11
  from typing import TYPE_CHECKING, Any
@@ -14,15 +15,28 @@ import yaml
14
15
  if TYPE_CHECKING:
15
16
  from klaude_code.config.config import ProviderConfig
16
17
 
17
- # All supported API key environment variables
18
- SUPPORTED_API_KEY_ENVS = [
19
- "ANTHROPIC_API_KEY",
20
- "GOOGLE_API_KEY",
21
- "OPENAI_API_KEY",
22
- "OPENROUTER_API_KEY",
23
- "DEEPSEEK_API_KEY",
24
- "MOONSHOT_API_KEY",
25
- ]
18
+
19
+ @dataclass(frozen=True)
20
+ class ApiKeyInfo:
21
+ """Information about a supported API key."""
22
+
23
+ env_var: str
24
+ name: str
25
+ description: str
26
+
27
+
28
+ # All supported API keys with their metadata
29
+ SUPPORTED_API_KEYS: tuple[ApiKeyInfo, ...] = (
30
+ ApiKeyInfo("ANTHROPIC_API_KEY", "Anthropic", "Anthropic API key"),
31
+ ApiKeyInfo("OPENAI_API_KEY", "OpenAI", "OpenAI API key"),
32
+ ApiKeyInfo("OPENROUTER_API_KEY", "OpenRouter", "OpenRouter API key"),
33
+ ApiKeyInfo("GOOGLE_API_KEY", "Google Gemini", "Google API key (Gemini)"),
34
+ ApiKeyInfo("DEEPSEEK_API_KEY", "DeepSeek", "DeepSeek API key"),
35
+ ApiKeyInfo("MOONSHOT_API_KEY", "Moonshot Kimi", "Moonshot API key (Kimi)"),
36
+ )
37
+
38
+ # For backwards compatibility
39
+ SUPPORTED_API_KEY_ENVS = [k.env_var for k in SUPPORTED_API_KEYS]
26
40
 
27
41
 
28
42
  @lru_cache(maxsize=1)
@@ -8,8 +8,9 @@ from typing import Any, cast
8
8
  import yaml
9
9
  from pydantic import BaseModel, Field, ValidationError, model_validator
10
10
 
11
+ from klaude_code.auth.env import get_auth_env
11
12
  from klaude_code.config.builtin_config import (
12
- SUPPORTED_API_KEY_ENVS,
13
+ SUPPORTED_API_KEYS,
13
14
  get_builtin_provider_configs,
14
15
  get_builtin_sub_agent_models,
15
16
  )
@@ -26,7 +27,8 @@ def parse_env_var_syntax(value: str | None) -> tuple[str | None, str | None]:
26
27
 
27
28
  Returns:
28
29
  A tuple of (env_var_name, resolved_value).
29
- - If value uses ${ENV_VAR} syntax: (env_var_name, os.environ.get(env_var_name))
30
+ - If value uses ${ENV_VAR} syntax: (env_var_name, resolved_value)
31
+ Priority: os.environ > klaude-auth.json env section
30
32
  - If value is a plain string: (None, value)
31
33
  - If value is None: (None, None)
32
34
  """
@@ -36,7 +38,9 @@ def parse_env_var_syntax(value: str | None) -> tuple[str | None, str | None]:
36
38
  match = _ENV_VAR_PATTERN.match(value)
37
39
  if match:
38
40
  env_var_name = match.group(1)
39
- return env_var_name, os.environ.get(env_var_name)
41
+ # Priority: real env var > auth.json env section
42
+ resolved = os.environ.get(env_var_name) or get_auth_env(env_var_name)
43
+ return env_var_name, resolved
40
44
 
41
45
  return None, value
42
46
 
@@ -613,17 +617,23 @@ def load_config() -> Config:
613
617
 
614
618
  def print_no_available_models_hint() -> None:
615
619
  """Print helpful message when no models are available due to missing API keys."""
616
- log("No available models. Please set one of the following environment variables:", style="yellow")
620
+ log("No available models. Configure an API key using one of these methods:", style="yellow")
617
621
  log("")
618
- for env_var in SUPPORTED_API_KEY_ENVS:
619
- current_value = os.environ.get(env_var)
622
+ log("Option 1: Use klaude auth login", style="bold")
623
+ # Use first word of name for brevity
624
+ names = [k.name.split()[0].lower() for k in SUPPORTED_API_KEYS]
625
+ log(f" klaude auth login <provider> (providers: {', '.join(names)})", style="dim")
626
+ log("")
627
+ log("Option 2: Set environment variables", style="bold")
628
+ max_len = max(len(k.env_var) for k in SUPPORTED_API_KEYS)
629
+ for key_info in SUPPORTED_API_KEYS:
630
+ current_value = os.environ.get(key_info.env_var) or get_auth_env(key_info.env_var)
620
631
  if current_value:
621
- log(f" {env_var} = (set)", style="green")
632
+ log(f" {key_info.env_var:<{max_len}} (set)", style="green")
622
633
  else:
623
- log(f" export {env_var}=<your-api-key>", style="dim")
634
+ log(f" {key_info.env_var:<{max_len}} {key_info.description}", style="dim")
624
635
  log("")
625
636
  log(f"Or add custom providers in: {config_path}", style="dim")
626
- log(f"See example config: {example_config_path}", style="dim")
627
637
 
628
638
 
629
639
  # Expose cache control for tests and callers that need to invalidate the cache.
klaude_code/core/turn.py CHANGED
@@ -25,10 +25,12 @@ from klaude_code.log import DebugType, log_debug
25
25
  from klaude_code.protocol import events, llm_param, message, model, tools
26
26
 
27
27
  # Protocols that support prefill (continuing from partial assistant message)
28
- _PREFILL_SUPPORTED_PROTOCOLS = frozenset({
29
- "anthropic",
30
- "claude_oauth",
31
- })
28
+ _PREFILL_SUPPORTED_PROTOCOLS = frozenset(
29
+ {
30
+ "anthropic",
31
+ "claude_oauth",
32
+ }
33
+ )
32
34
 
33
35
 
34
36
  class TurnError(Exception):
@@ -183,6 +185,7 @@ class TurnExecutor:
183
185
 
184
186
  if self._turn_result.stream_error is not None:
185
187
  # Save accumulated content for potential prefill on retry (only for supported protocols)
188
+ session_ctx.append_history([self._turn_result.stream_error])
186
189
  protocol = ctx.llm_client.get_llm_config().protocol
187
190
  supports_prefill = protocol.value in _PREFILL_SUPPORTED_PROTOCOLS
188
191
  if (
@@ -194,7 +197,6 @@ class TurnExecutor:
194
197
  session_ctx.append_history([self._turn_result.assistant_message])
195
198
  # Add continuation prompt to avoid Anthropic thinking block requirement
196
199
  session_ctx.append_history([message.UserMessage(parts=[message.TextPart(text="continue")])])
197
- session_ctx.append_history([self._turn_result.stream_error])
198
200
  yield events.TurnEndEvent(session_id=session_ctx.session_id)
199
201
  raise TurnError(self._turn_result.stream_error.error)
200
202
 
@@ -249,9 +251,6 @@ class TurnExecutor:
249
251
  image_size = generation.get("image_size")
250
252
  if image_size in SUPPORTED_IMAGE_SIZES:
251
253
  image_config.image_size = image_size
252
- extra = generation.get("extra")
253
- if isinstance(extra, dict) and extra:
254
- image_config.extra = extra
255
254
  if image_config.model_dump(exclude_none=True):
256
255
  call_param.image_config = image_config
257
256
 
@@ -25,6 +25,9 @@ from google.genai.types import (
25
25
  ThinkingLevel,
26
26
  ToolConfig,
27
27
  )
28
+ from google.genai.types import (
29
+ ImageConfig as GoogleImageConfig,
30
+ )
28
31
 
29
32
  from klaude_code.llm.client import LLMClientABC, LLMStreamABC
30
33
  from klaude_code.llm.google.input import convert_history_to_contents, convert_tool_schema
@@ -91,6 +94,14 @@ def _build_config(param: llm_param.LLMCallParameter) -> GenerateContentConfig:
91
94
  if param.thinking.reasoning_effort:
92
95
  thinking_config.thinking_level = convert_gemini_thinking_level(param.thinking.reasoning_effort)
93
96
 
97
+ # ImageGen per-call overrides
98
+ image_config: GoogleImageConfig | None = None
99
+ if param.image_config is not None:
100
+ image_config = GoogleImageConfig(
101
+ aspect_ratio=param.image_config.aspect_ratio,
102
+ image_size=param.image_config.image_size,
103
+ )
104
+
94
105
  return GenerateContentConfig(
95
106
  system_instruction=param.system,
96
107
  temperature=param.temperature,
@@ -98,6 +109,7 @@ def _build_config(param: llm_param.LLMCallParameter) -> GenerateContentConfig:
98
109
  tools=cast(Any, tool_list) if tool_list else None,
99
110
  tool_config=tool_config,
100
111
  thinking_config=thinking_config,
112
+ image_config=image_config,
101
113
  )
102
114
 
103
115
 
@@ -199,6 +199,7 @@ async def parse_chat_completions_stream(
199
199
  metadata_tracker: MetadataTracker,
200
200
  reasoning_handler: ReasoningHandlerABC,
201
201
  on_event: Callable[[object], None] | None = None,
202
+ provider_prefix: str = "",
202
203
  ) -> AsyncGenerator[message.LLMStreamItem]:
203
204
  """Parse OpenAI Chat Completions stream into stream items.
204
205
 
@@ -235,7 +236,7 @@ async def parse_chat_completions_stream(
235
236
  if event_model := getattr(event, "model", None):
236
237
  metadata_tracker.set_model_name(str(event_model))
237
238
  if provider := getattr(event, "provider", None):
238
- metadata_tracker.set_provider(str(provider))
239
+ metadata_tracker.set_provider(f"{provider_prefix}{provider}")
239
240
 
240
241
  choices = cast(Any, getattr(event, "choices", None))
241
242
  if not choices:
@@ -364,12 +365,14 @@ class OpenAILLMStream(LLMStreamABC):
364
365
  metadata_tracker: MetadataTracker,
365
366
  reasoning_handler: ReasoningHandlerABC,
366
367
  on_event: Callable[[object], None] | None = None,
368
+ provider_prefix: str = "",
367
369
  ) -> None:
368
370
  self._stream = stream
369
371
  self._param = param
370
372
  self._metadata_tracker = metadata_tracker
371
373
  self._reasoning_handler = reasoning_handler
372
374
  self._on_event = on_event
375
+ self._provider_prefix = provider_prefix
373
376
  self._state = StreamStateManager(
374
377
  param_model=str(param.model_id),
375
378
  )
@@ -386,6 +389,7 @@ class OpenAILLMStream(LLMStreamABC):
386
389
  metadata_tracker=self._metadata_tracker,
387
390
  reasoning_handler=self._reasoning_handler,
388
391
  on_event=self._on_event,
392
+ provider_prefix=self._provider_prefix,
389
393
  ):
390
394
  if isinstance(item, message.AssistantMessage):
391
395
  self._completed = True
@@ -145,4 +145,5 @@ class OpenRouterClient(LLMClientABC):
145
145
  metadata_tracker=metadata_tracker,
146
146
  reasoning_handler=reasoning_handler,
147
147
  on_event=on_event,
148
+ provider_prefix="openrouter/",
148
149
  )
@@ -0,0 +1,214 @@
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from typing import Literal
5
+
6
+ from pydantic import BaseModel, Field
7
+
8
+ from klaude_code.protocol import llm_param, message, model
9
+ from klaude_code.protocol.commands import CommandName
10
+
11
+ __all__ = [
12
+ "AssistantImageDeltaEvent",
13
+ "AssistantTextDeltaEvent",
14
+ "AssistantTextEndEvent",
15
+ "AssistantTextStartEvent",
16
+ "CommandOutputEvent",
17
+ "DeveloperMessageEvent",
18
+ "EndEvent",
19
+ "ErrorEvent",
20
+ "Event",
21
+ "InterruptEvent",
22
+ "ReplayEventUnion",
23
+ "ReplayHistoryEvent",
24
+ "ResponseCompleteEvent",
25
+ "ResponseEvent",
26
+ "TaskFinishEvent",
27
+ "TaskMetadataEvent",
28
+ "TaskStartEvent",
29
+ "ThinkingDeltaEvent",
30
+ "ThinkingEndEvent",
31
+ "ThinkingStartEvent",
32
+ "TodoChangeEvent",
33
+ "ToolCallEvent",
34
+ "ToolCallStartEvent",
35
+ "ToolResultEvent",
36
+ "TurnEndEvent",
37
+ "TurnStartEvent",
38
+ "UsageEvent",
39
+ "UserMessageEvent",
40
+ "WelcomeEvent",
41
+ ]
42
+
43
+
44
+ class Event(BaseModel):
45
+ """Base event."""
46
+
47
+ session_id: str
48
+ timestamp: float = Field(default_factory=time.time)
49
+
50
+
51
+ class ResponseEvent(Event):
52
+ """Event associated with a single model response."""
53
+
54
+ response_id: str | None = None
55
+
56
+
57
+ class UserMessageEvent(Event):
58
+ content: str
59
+ images: list[message.ImageURLPart] | None = None
60
+
61
+
62
+ class DeveloperMessageEvent(Event):
63
+ """DeveloperMessages are reminders in user messages or tool results."""
64
+
65
+ item: message.DeveloperMessage
66
+
67
+
68
+ class TodoChangeEvent(Event):
69
+ todos: list[model.TodoItem]
70
+
71
+
72
+ class CommandOutputEvent(Event):
73
+ """Event for command output display. Not persisted to session history."""
74
+
75
+ command_name: CommandName | str
76
+ content: str = ""
77
+ ui_extra: model.ToolResultUIExtra | None = None
78
+ is_error: bool = False
79
+
80
+
81
+ class TaskStartEvent(Event):
82
+ sub_agent_state: model.SubAgentState | None = None
83
+ model_id: str | None = None
84
+
85
+
86
+ class TaskFinishEvent(Event):
87
+ task_result: str
88
+ has_structured_output: bool = False
89
+
90
+
91
+ class TurnStartEvent(Event):
92
+ pass
93
+
94
+
95
+ class TurnEndEvent(Event):
96
+ pass
97
+
98
+
99
+ class UsageEvent(ResponseEvent):
100
+ usage: model.Usage
101
+
102
+
103
+ class TaskMetadataEvent(Event):
104
+ metadata: model.TaskMetadataItem
105
+ cancelled: bool = False
106
+
107
+
108
+ class ThinkingStartEvent(ResponseEvent):
109
+ pass
110
+
111
+
112
+ class ThinkingDeltaEvent(ResponseEvent):
113
+ content: str
114
+
115
+
116
+ class ThinkingEndEvent(ResponseEvent):
117
+ pass
118
+
119
+
120
+ class AssistantTextStartEvent(ResponseEvent):
121
+ pass
122
+
123
+
124
+ class AssistantTextDeltaEvent(ResponseEvent):
125
+ content: str
126
+
127
+
128
+ class AssistantTextEndEvent(ResponseEvent):
129
+ pass
130
+
131
+
132
+ class AssistantImageDeltaEvent(ResponseEvent):
133
+ file_path: str
134
+
135
+
136
+ class ToolCallStartEvent(ResponseEvent):
137
+ tool_call_id: str
138
+ tool_name: str
139
+
140
+
141
+ class ResponseCompleteEvent(ResponseEvent):
142
+ """Final snapshot of the model response."""
143
+
144
+ content: str
145
+ thinking_text: str | None = None
146
+
147
+
148
+ class WelcomeEvent(Event):
149
+ work_dir: str
150
+ llm_config: llm_param.LLMConfigParameter
151
+ show_klaude_code_info: bool = True
152
+ loaded_skills: dict[str, list[str]] = Field(default_factory=dict)
153
+
154
+
155
+ class ErrorEvent(Event):
156
+ error_message: str
157
+ can_retry: bool = False
158
+
159
+
160
+ class InterruptEvent(Event):
161
+ pass
162
+
163
+
164
+ class EndEvent(Event):
165
+ """Global display shutdown."""
166
+
167
+ session_id: str = "__app__"
168
+
169
+
170
+ type ReplayEventUnion = (
171
+ TaskStartEvent
172
+ | TaskFinishEvent
173
+ | TurnStartEvent
174
+ | ThinkingStartEvent
175
+ | ThinkingDeltaEvent
176
+ | ThinkingEndEvent
177
+ | AssistantTextStartEvent
178
+ | AssistantTextDeltaEvent
179
+ | AssistantTextEndEvent
180
+ | AssistantImageDeltaEvent
181
+ | ToolCallEvent
182
+ | ToolResultEvent
183
+ | UserMessageEvent
184
+ | TaskMetadataEvent
185
+ | InterruptEvent
186
+ | DeveloperMessageEvent
187
+ | ErrorEvent
188
+ )
189
+
190
+
191
+ class ReplayHistoryEvent(Event):
192
+ events: list[ReplayEventUnion]
193
+ updated_at: float
194
+ is_load: bool = True
195
+
196
+
197
+ class ToolCallEvent(ResponseEvent):
198
+ tool_call_id: str
199
+ tool_name: str
200
+ arguments: str
201
+
202
+
203
+ class ToolResultEvent(ResponseEvent):
204
+ tool_call_id: str
205
+ tool_name: str
206
+ result: str
207
+ ui_extra: model.ToolResultUIExtra | None = None
208
+ status: Literal["success", "error", "aborted"]
209
+ task_metadata: model.TaskMetadata | None = None
210
+ is_last_in_turn: bool = True
211
+
212
+ @property
213
+ def is_error(self) -> bool:
214
+ return self.status in ("error", "aborted")
@@ -66,10 +66,6 @@ IMAGE_GEN_PARAMETERS: dict[str, Any] = {
66
66
  "enum": ["1K", "2K", "4K"],
67
67
  "description": "Output size for Nano Banana Pro (must use uppercase K).",
68
68
  },
69
- "extra": {
70
- "type": "object",
71
- "description": "Provider/model-specific extra parameters (future-proofing).",
72
- },
73
69
  },
74
70
  "additionalProperties": False,
75
71
  },